From b0c2f9045394211c489c9e7777deef682e6701ba Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sat, 8 Oct 2022 10:49:52 -0700 Subject: [PATCH] [RISCV] Merge more rv32/rv64 vector intrinsic tests that contain the same content. --- llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll | 1456 ---------- .../CodeGen/RISCV/rvv/{vadc-rv64.ll => vadc.ll} | 506 ++-- llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll | 2800 ------------------- .../CodeGen/RISCV/rvv/{vand-rv32.ll => vand.ll} | 1038 +++---- llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll | 2074 -------------- .../CodeGen/RISCV/rvv/{vdiv-rv32.ll => vdiv.ll} | 862 +++--- llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll | 2074 -------------- .../CodeGen/RISCV/rvv/{vdivu-rv32.ll => vdivu.ll} | 862 +++--- llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll | 2029 -------------- llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll | 2029 -------------- llvm/test/CodeGen/RISCV/rvv/vleff.ll | 2639 ++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll | 1772 ------------ .../CodeGen/RISCV/rvv/{vmacc-rv64.ll => vmacc.ll} | 660 +++-- llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll | 1236 --------- llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll | 1388 ---------- .../{vmadc.carry.in-rv32.ll => vmadc.carry.in.ll} | 490 ++-- .../CodeGen/RISCV/rvv/{vmadc-rv64.ll => vmadc.ll} | 454 ++-- llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll | 1694 ------------ .../CodeGen/RISCV/rvv/{vmadd-rv32.ll => vmadd.ll} | 702 ++--- llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll | 2122 --------------- .../CodeGen/RISCV/rvv/{vmax-rv64.ll => vmax.ll} | 814 +++--- llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll | 2122 --------------- .../CodeGen/RISCV/rvv/{vmaxu-rv64.ll => vmaxu.ll} | 814 +++--- llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll | 2074 -------------- .../CodeGen/RISCV/rvv/{vmin-rv32.ll => vmin.ll} | 862 +++--- llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll | 2074 -------------- .../CodeGen/RISCV/rvv/{vminu-rv32.ll => vminu.ll} | 862 +++--- llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll | 882 ------ .../test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll | 1014 ------- ...{vmsbc.borrow.in-rv32.ll => vmsbc.borrow.in.ll} | 402 +-- .../CodeGen/RISCV/rvv/{vmsbc-rv32.ll => vmsbc.ll} | 390 +-- llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll | 2414 ----------------- .../CodeGen/RISCV/rvv/{vmseq-rv32.ll => vmseq.ll} | 825 +++--- llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll | 2757 ------------------- .../CodeGen/RISCV/rvv/{vmsge-rv32.ll => vmsge.ll} | 995 +++---- llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll | 2769 ------------------- .../RISCV/rvv/{vmsgeu-rv32.ll => vmsgeu.ll} | 999 +++---- llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll | 2414 ----------------- .../CodeGen/RISCV/rvv/{vmsgt-rv32.ll => vmsgt.ll} | 825 +++--- llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll | 2414 ----------------- .../RISCV/rvv/{vmsgtu-rv32.ll => vmsgtu.ll} | 825 +++--- llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll | 2450 ----------------- .../CodeGen/RISCV/rvv/{vmsle-rv64.ll => vmsle.ll} | 789 +++--- llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll | 2414 ----------------- .../RISCV/rvv/{vmsleu-rv32.ll => vmsleu.ll} | 825 +++--- llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll | 2414 ----------------- .../CodeGen/RISCV/rvv/{vmslt-rv32.ll => vmslt.ll} | 825 +++--- llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll | 2414 ----------------- .../RISCV/rvv/{vmsltu-rv32.ll => vmsltu.ll} | 825 +++--- llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll | 2450 ----------------- .../CodeGen/RISCV/rvv/{vmsne-rv64.ll => vmsne.ll} | 789 +++--- llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll | 1694 ------------ .../RISCV/rvv/{vnmsac-rv32.ll => vnmsac.ll} | 702 ++--- llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll | 1694 ------------ .../RISCV/rvv/{vnmsub-rv32.ll => vnmsub.ll} | 702 ++--- llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll | 2848 -------------------- .../test/CodeGen/RISCV/rvv/{vor-rv64.ll => vor.ll} | 990 ++++--- llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll | 2122 --------------- .../CodeGen/RISCV/rvv/{vrem-rv64.ll => vrem.ll} | 814 +++--- llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll | 2074 -------------- .../CodeGen/RISCV/rvv/{vremu-rv32.ll => vremu.ll} | 862 +++--- llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll | 1058 -------- .../CodeGen/RISCV/rvv/{vsbc-rv32.ll => vsbc.ll} | 390 +-- llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll | 458 ---- .../rvv/{vselect-fp-rv32.ll => vselect-fp.ll} | 2 + llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll | 827 ------ .../rvv/{vselect-int-rv64.ll => vselect-int.ll} | 93 +- llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll | 2800 ------------------- .../CodeGen/RISCV/rvv/{vxor-rv32.ll => vxor.ll} | 1038 +++---- 69 files changed, 15451 insertions(+), 80346 deletions(-) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll rename llvm/test/CodeGen/RISCV/rvv/{vadc-rv64.ll => vadc.ll} (82%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vand-rv32.ll => vand.ll} (83%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vdiv-rv32.ll => vdiv.ll} (80%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vdivu-rv32.ll => vdivu.ll} (80%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vleff.ll delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll rename llvm/test/CodeGen/RISCV/rvv/{vmacc-rv64.ll => vmacc.ll} (80%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vmadc.carry.in-rv32.ll => vmadc.carry.in.ll} (83%) rename llvm/test/CodeGen/RISCV/rvv/{vmadc-rv64.ll => vmadc.ll} (79%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vmadd-rv32.ll => vmadd.ll} (80%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll rename llvm/test/CodeGen/RISCV/rvv/{vmax-rv64.ll => vmax.ll} (80%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll rename llvm/test/CodeGen/RISCV/rvv/{vmaxu-rv64.ll => vmaxu.ll} (80%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vmin-rv32.ll => vmin.ll} (80%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vminu-rv32.ll => vminu.ll} (80%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vmsbc.borrow.in-rv32.ll => vmsbc.borrow.in.ll} (83%) rename llvm/test/CodeGen/RISCV/rvv/{vmsbc-rv32.ll => vmsbc.ll} (77%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vmseq-rv32.ll => vmseq.ll} (84%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vmsge-rv32.ll => vmsge.ll} (82%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vmsgeu-rv32.ll => vmsgeu.ll} (83%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vmsgt-rv32.ll => vmsgt.ll} (84%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vmsgtu-rv32.ll => vmsgtu.ll} (85%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll rename llvm/test/CodeGen/RISCV/rvv/{vmsle-rv64.ll => vmsle.ll} (84%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vmsleu-rv32.ll => vmsleu.ll} (85%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vmslt-rv32.ll => vmslt.ll} (84%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vmsltu-rv32.ll => vmsltu.ll} (85%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll rename llvm/test/CodeGen/RISCV/rvv/{vmsne-rv64.ll => vmsne.ll} (84%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vnmsac-rv32.ll => vnmsac.ll} (80%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vnmsub-rv32.ll => vnmsub.ll} (80%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll rename llvm/test/CodeGen/RISCV/rvv/{vor-rv64.ll => vor.ll} (83%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll rename llvm/test/CodeGen/RISCV/rvv/{vrem-rv64.ll => vrem.ll} (80%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vremu-rv32.ll => vremu.ll} (80%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vsbc-rv32.ll => vsbc.ll} (82%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vselect-fp-rv32.ll => vselect-fp.ll} (99%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll rename llvm/test/CodeGen/RISCV/rvv/{vselect-int-rv64.ll => vselect-int.ll} (93%) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll rename llvm/test/CodeGen/RISCV/rvv/{vxor-rv32.ll => vxor.ll} (83%) diff --git a/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll deleted file mode 100644 index d62e490..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll +++ /dev/null @@ -1,1456 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vadc.nxv1i8.nxv1i8( - , - , - , - , - i32); - -define @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv1i8.nxv1i8( - undef, - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv2i8.nxv2i8( - , - , - , - , - i32); - -define @intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv2i8.nxv2i8( - undef, - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv4i8.nxv4i8( - , - , - , - , - i32); - -define @intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv4i8.nxv4i8( - undef, - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv8i8.nxv8i8( - , - , - , - , - i32); - -define @intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv8i8.nxv8i8( - undef, - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv16i8.nxv16i8( - , - , - , - , - i32); - -define @intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vadc.vvm v8, v8, v10, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv16i8.nxv16i8( - undef, - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv32i8.nxv32i8( - , - , - , - , - i32); - -define @intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vadc.vvm v8, v8, v12, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv32i8.nxv32i8( - undef, - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv64i8.nxv64i8( - , - , - , - , - i32); - -define @intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vadc.vvm v8, v8, v16, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv64i8.nxv64i8( - undef, - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv1i16.nxv1i16( - , - , - , - , - i32); - -define @intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv1i16.nxv1i16( - undef, - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv2i16.nxv2i16( - , - , - , - , - i32); - -define @intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv2i16.nxv2i16( - undef, - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv4i16.nxv4i16( - , - , - , - , - i32); - -define @intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv4i16.nxv4i16( - undef, - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv8i16.nxv8i16( - , - , - , - , - i32); - -define @intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vadc.vvm v8, v8, v10, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv8i16.nxv8i16( - undef, - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv16i16.nxv16i16( - , - , - , - , - i32); - -define @intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vadc.vvm v8, v8, v12, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv16i16.nxv16i16( - undef, - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv32i16.nxv32i16( - , - , - , - , - i32); - -define @intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vadc.vvm v8, v8, v16, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv32i16.nxv32i16( - undef, - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv1i32.nxv1i32( - , - , - , - , - i32); - -define @intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv1i32.nxv1i32( - undef, - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv2i32.nxv2i32( - , - , - , - , - i32); - -define @intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv2i32.nxv2i32( - undef, - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv4i32.nxv4i32( - , - , - , - , - i32); - -define @intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vadc.vvm v8, v8, v10, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv4i32.nxv4i32( - undef, - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv8i32.nxv8i32( - , - , - , - , - i32); - -define @intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vadc.vvm v8, v8, v12, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv8i32.nxv8i32( - undef, - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv16i32.nxv16i32( - , - , - , - , - i32); - -define @intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vadc.vvm v8, v8, v16, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv16i32.nxv16i32( - undef, - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv1i64.nxv1i64( - , - , - , - , - i32); - -define @intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv1i64.nxv1i64( - undef, - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv2i64.nxv2i64( - , - , - , - , - i32); - -define @intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vadc.vvm v8, v8, v10, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv2i64.nxv2i64( - undef, - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv4i64.nxv4i64( - , - , - , - , - i32); - -define @intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vadc.vvm v8, v8, v12, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv4i64.nxv4i64( - undef, - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv8i64.nxv8i64( - , - , - , - , - i32); - -define @intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vadc.vvm v8, v8, v16, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv8i64.nxv8i64( - undef, - %0, - %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv1i8.i8( - , - , - i8, - , - i32); - -define @intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv1i8.i8( - undef, - %0, - i8 %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv2i8.i8( - , - , - i8, - , - i32); - -define @intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8( %0, i8 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv2i8.i8( - undef, - %0, - i8 %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv4i8.i8( - , - , - i8, - , - i32); - -define @intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8( %0, i8 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv4i8.i8( - undef, - %0, - i8 %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv8i8.i8( - , - , - i8, - , - i32); - -define @intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8( %0, i8 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv8i8.i8( - undef, - %0, - i8 %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv16i8.i8( - , - , - i8, - , - i32); - -define @intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8( %0, i8 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv16i8.i8( - undef, - %0, - i8 %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv32i8.i8( - , - , - i8, - , - i32); - -define @intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8( %0, i8 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv32i8.i8( - undef, - %0, - i8 %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv64i8.i8( - , - , - i8, - , - i32); - -define @intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8( %0, i8 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv64i8.i8( - undef, - %0, - i8 %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv1i16.i16( - , - , - i16, - , - i32); - -define @intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16( %0, i16 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv1i16.i16( - undef, - %0, - i16 %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv2i16.i16( - , - , - i16, - , - i32); - -define @intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16( %0, i16 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv2i16.i16( - undef, - %0, - i16 %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv4i16.i16( - , - , - i16, - , - i32); - -define @intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16( %0, i16 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv4i16.i16( - undef, - %0, - i16 %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv8i16.i16( - , - , - i16, - , - i32); - -define @intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16( %0, i16 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv8i16.i16( - undef, - %0, - i16 %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv16i16.i16( - , - , - i16, - , - i32); - -define @intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16( %0, i16 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv16i16.i16( - undef, - %0, - i16 %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv32i16.i16( - , - , - i16, - , - i32); - -define @intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16( %0, i16 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv32i16.i16( - undef, - %0, - i16 %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv1i32.i32( - , - , - i32, - , - i32); - -define @intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32( %0, i32 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv1i32.i32( - undef, - %0, - i32 %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv2i32.i32( - , - , - i32, - , - i32); - -define @intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32( %0, i32 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv2i32.i32( - undef, - %0, - i32 %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv4i32.i32( - , - , - i32, - , - i32); - -define @intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32( %0, i32 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv4i32.i32( - undef, - %0, - i32 %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv8i32.i32( - , - , - i32, - , - i32); - -define @intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32( %0, i32 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv8i32.i32( - undef, - %0, - i32 %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv16i32.i32( - , - , - i32, - , - i32); - -define @intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32( %0, i32 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv16i32.i32( - undef, - %0, - i32 %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv1i64.i64( - , - , - i64, - , - i32); - -define @intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv1i64.i64( - undef, - %0, - i64 %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv2i64.i64( - , - , - i64, - , - i32); - -define @intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vadc.vvm v8, v8, v10, v0 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv2i64.i64( - undef, - %0, - i64 %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv4i64.i64( - , - , - i64, - , - i32); - -define @intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vadc.vvm v8, v8, v12, v0 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv4i64.i64( - undef, - %0, - i64 %1, - %2, - i32 %3) - - ret %a -} - -declare @llvm.riscv.vadc.nxv8i64.i64( - , - , - i64, - , - i32); - -define @intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vadc.vvm v8, v8, v16, v0 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv8i64.i64( - undef, - %0, - i64 %1, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vadc_vim_nxv1i8_nxv1i8_i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vadc_vim_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, -9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv1i8.i8( - undef, - %0, - i8 -9, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vadc_vim_nxv2i8_nxv2i8_i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vadc_vim_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, 9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv2i8.i8( - undef, - %0, - i8 9, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vadc_vim_nxv4i8_nxv4i8_i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vadc_vim_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, -9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv4i8.i8( - undef, - %0, - i8 -9, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vadc_vim_nxv8i8_nxv8i8_i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vadc_vim_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, 9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv8i8.i8( - undef, - %0, - i8 9, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vadc_vim_nxv16i8_nxv16i8_i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vadc_vim_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, -9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv16i8.i8( - undef, - %0, - i8 -9, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vadc_vim_nxv32i8_nxv32i8_i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vadc_vim_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, 9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv32i8.i8( - undef, - %0, - i8 9, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vadc_vim_nxv64i8_nxv64i8_i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vadc_vim_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, -9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv64i8.i8( - undef, - %0, - i8 -9, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vadc_vim_nxv1i16_nxv1i16_i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vadc_vim_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, 9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv1i16.i16( - undef, - %0, - i16 9, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vadc_vim_nxv2i16_nxv2i16_i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vadc_vim_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, -9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv2i16.i16( - undef, - %0, - i16 -9, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vadc_vim_nxv4i16_nxv4i16_i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vadc_vim_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, 9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv4i16.i16( - undef, - %0, - i16 9, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vadc_vim_nxv8i16_nxv8i16_i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vadc_vim_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, -9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv8i16.i16( - undef, - %0, - i16 -9, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vadc_vim_nxv16i16_nxv16i16_i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vadc_vim_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, 9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv16i16.i16( - undef, - %0, - i16 9, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vadc_vim_nxv32i16_nxv32i16_i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vadc_vim_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, -9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv32i16.i16( - undef, - %0, - i16 -9, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vadc_vim_nxv1i32_nxv1i32_i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vadc_vim_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, 9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv1i32.i32( - undef, - %0, - i32 9, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vadc_vim_nxv2i32_nxv2i32_i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vadc_vim_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, -9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv2i32.i32( - undef, - %0, - i32 -9, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vadc_vim_nxv4i32_nxv4i32_i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vadc_vim_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, 9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv4i32.i32( - undef, - %0, - i32 9, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vadc_vim_nxv8i32_nxv8i32_i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vadc_vim_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, -9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv8i32.i32( - undef, - %0, - i32 -9, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vadc_vim_nxv16i32_nxv16i32_i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vadc_vim_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, 9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv16i32.i32( - undef, - %0, - i32 9, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vadc_vim_nxv1i64_nxv1i64_i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vadc_vim_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, 9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv1i64.i64( - undef, - %0, - i64 9, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vadc_vim_nxv2i64_nxv2i64_i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vadc_vim_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, -9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv2i64.i64( - undef, - %0, - i64 -9, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vadc_vim_nxv4i64_nxv4i64_i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vadc_vim_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, 9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv4i64.i64( - undef, - %0, - i64 9, - %1, - i32 %2) - - ret %a -} - -define @intrinsic_vadc_vim_nxv8i64_nxv8i64_i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vadc_vim_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, -9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vadc.nxv8i64.i64( - undef, - %0, - i64 -9, - %1, - i32 %2) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadc.ll similarity index 82% rename from llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vadc.ll index bf6ce4e..629426a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadc.ll @@ -1,14 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vadc.nxv1i8.nxv1i8( , , , , - i64); + iXLen); -define @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -20,7 +22,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -30,9 +32,9 @@ declare @llvm.riscv.vadc.nxv2i8.nxv2i8( , , , - i64); + iXLen); -define @intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -44,7 +46,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -54,9 +56,9 @@ declare @llvm.riscv.vadc.nxv4i8.nxv4i8( , , , - i64); + iXLen); -define @intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -68,7 +70,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -78,9 +80,9 @@ declare @llvm.riscv.vadc.nxv8i8.nxv8i8( , , , - i64); + iXLen); -define @intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -92,7 +94,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -102,9 +104,9 @@ declare @llvm.riscv.vadc.nxv16i8.nxv16i8( , , , - i64); + iXLen); -define @intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -116,7 +118,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -126,9 +128,9 @@ declare @llvm.riscv.vadc.nxv32i8.nxv32i8( , , , - i64); + iXLen); -define @intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -140,7 +142,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -150,9 +152,9 @@ declare @llvm.riscv.vadc.nxv64i8.nxv64i8( , , , - i64); + iXLen); -define @intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma @@ -164,7 +166,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -174,9 +176,9 @@ declare @llvm.riscv.vadc.nxv1i16.nxv1i16( , , , - i64); + iXLen); -define @intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -188,7 +190,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -198,9 +200,9 @@ declare @llvm.riscv.vadc.nxv2i16.nxv2i16( , , , - i64); + iXLen); -define @intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -212,7 +214,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -222,9 +224,9 @@ declare @llvm.riscv.vadc.nxv4i16.nxv4i16( , , , - i64); + iXLen); -define @intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -236,7 +238,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -246,9 +248,9 @@ declare @llvm.riscv.vadc.nxv8i16.nxv8i16( , , , - i64); + iXLen); -define @intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -260,7 +262,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -270,9 +272,9 @@ declare @llvm.riscv.vadc.nxv16i16.nxv16i16( , , , - i64); + iXLen); -define @intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -284,7 +286,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -294,9 +296,9 @@ declare @llvm.riscv.vadc.nxv32i16.nxv32i16( , , , - i64); + iXLen); -define @intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma @@ -308,7 +310,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -318,9 +320,9 @@ declare @llvm.riscv.vadc.nxv1i32.nxv1i32( , , , - i64); + iXLen); -define @intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -332,7 +334,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -342,9 +344,9 @@ declare @llvm.riscv.vadc.nxv2i32.nxv2i32( , , , - i64); + iXLen); -define @intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -356,7 +358,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -366,9 +368,9 @@ declare @llvm.riscv.vadc.nxv4i32.nxv4i32( , , , - i64); + iXLen); -define @intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -380,7 +382,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -390,9 +392,9 @@ declare @llvm.riscv.vadc.nxv8i32.nxv8i32( , , , - i64); + iXLen); -define @intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -404,7 +406,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -414,9 +416,9 @@ declare @llvm.riscv.vadc.nxv16i32.nxv16i32( , , , - i64); + iXLen); -define @intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -428,7 +430,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -438,9 +440,9 @@ declare @llvm.riscv.vadc.nxv1i64.nxv1i64( , , , - i64); + iXLen); -define @intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -452,7 +454,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -462,9 +464,9 @@ declare @llvm.riscv.vadc.nxv2i64.nxv2i64( , , , - i64); + iXLen); -define @intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -476,7 +478,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -486,9 +488,9 @@ declare @llvm.riscv.vadc.nxv4i64.nxv4i64( , , , - i64); + iXLen); -define @intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -500,7 +502,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -510,9 +512,9 @@ declare @llvm.riscv.vadc.nxv8i64.nxv8i64( , , , - i64); + iXLen); -define @intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma @@ -524,7 +526,7 @@ entry: %0, %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vadc.nxv1i8.i8( , i8, , - i64); + iXLen); -define @intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -548,7 +550,7 @@ entry: %0, i8 %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -558,9 +560,9 @@ declare @llvm.riscv.vadc.nxv2i8.i8( , i8, , - i64); + iXLen); -define @intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -572,7 +574,7 @@ entry: %0, i8 %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -582,9 +584,9 @@ declare @llvm.riscv.vadc.nxv4i8.i8( , i8, , - i64); + iXLen); -define @intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -596,7 +598,7 @@ entry: %0, i8 %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -606,9 +608,9 @@ declare @llvm.riscv.vadc.nxv8i8.i8( , i8, , - i64); + iXLen); -define @intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -620,7 +622,7 @@ entry: %0, i8 %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -630,9 +632,9 @@ declare @llvm.riscv.vadc.nxv16i8.i8( , i8, , - i64); + iXLen); -define @intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -644,7 +646,7 @@ entry: %0, i8 %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -654,9 +656,9 @@ declare @llvm.riscv.vadc.nxv32i8.i8( , i8, , - i64); + iXLen); -define @intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -668,7 +670,7 @@ entry: %0, i8 %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -678,9 +680,9 @@ declare @llvm.riscv.vadc.nxv64i8.i8( , i8, , - i64); + iXLen); -define @intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma @@ -692,7 +694,7 @@ entry: %0, i8 %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -702,9 +704,9 @@ declare @llvm.riscv.vadc.nxv1i16.i16( , i16, , - i64); + iXLen); -define @intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -716,7 +718,7 @@ entry: %0, i16 %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -726,9 +728,9 @@ declare @llvm.riscv.vadc.nxv2i16.i16( , i16, , - i64); + iXLen); -define @intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -740,7 +742,7 @@ entry: %0, i16 %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -750,9 +752,9 @@ declare @llvm.riscv.vadc.nxv4i16.i16( , i16, , - i64); + iXLen); -define @intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -764,7 +766,7 @@ entry: %0, i16 %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -774,9 +776,9 @@ declare @llvm.riscv.vadc.nxv8i16.i16( , i16, , - i64); + iXLen); -define @intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -788,7 +790,7 @@ entry: %0, i16 %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -798,9 +800,9 @@ declare @llvm.riscv.vadc.nxv16i16.i16( , i16, , - i64); + iXLen); -define @intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -812,7 +814,7 @@ entry: %0, i16 %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -822,9 +824,9 @@ declare @llvm.riscv.vadc.nxv32i16.i16( , i16, , - i64); + iXLen); -define @intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma @@ -836,7 +838,7 @@ entry: %0, i16 %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -846,9 +848,9 @@ declare @llvm.riscv.vadc.nxv1i32.i32( , i32, , - i64); + iXLen); -define @intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -860,7 +862,7 @@ entry: %0, i32 %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -870,9 +872,9 @@ declare @llvm.riscv.vadc.nxv2i32.i32( , i32, , - i64); + iXLen); -define @intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -884,7 +886,7 @@ entry: %0, i32 %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -894,9 +896,9 @@ declare @llvm.riscv.vadc.nxv4i32.i32( , i32, , - i64); + iXLen); -define @intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -908,7 +910,7 @@ entry: %0, i32 %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -918,9 +920,9 @@ declare @llvm.riscv.vadc.nxv8i32.i32( , i32, , - i64); + iXLen); -define @intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -932,7 +934,7 @@ entry: %0, i32 %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -942,9 +944,9 @@ declare @llvm.riscv.vadc.nxv16i32.i32( , i32, , - i64); + iXLen); -define @intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +define @intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma @@ -956,7 +958,7 @@ entry: %0, i32 %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -966,21 +968,33 @@ declare @llvm.riscv.vadc.nxv1i64.i64( , i64, , - i64); - -define @intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vadc.vvm v8, v8, v9, v0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vadc.vxm v8, v8, a0, v0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i64.i64( undef, %0, i64 %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -990,21 +1004,33 @@ declare @llvm.riscv.vadc.nxv2i64.i64( , i64, , - i64); - -define @intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vadc.vvm v8, v8, v10, v0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vadc.vxm v8, v8, a0, v0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i64.i64( undef, %0, i64 %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -1014,21 +1040,33 @@ declare @llvm.riscv.vadc.nxv4i64.i64( , i64, , - i64); - -define @intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vadc.vvm v8, v8, v12, v0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vadc.vxm v8, v8, a0, v0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i64.i64( undef, %0, i64 %1, %2, - i64 %3) + iXLen %3) ret %a } @@ -1038,247 +1076,259 @@ declare @llvm.riscv.vadc.nxv8i64.i64( , i64, , - i64); - -define @intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vadc.vvm v8, v8, v16, v0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vadc.vxm v8, v8, a0, v0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i64.i64( undef, %0, i64 %1, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vadc_vim_nxv1i8_nxv1i8_i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vadc_vim_nxv1i8_nxv1i8_i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, 9, v0 +; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i8.i8( undef, %0, - i8 9, + i8 -9, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vadc_vim_nxv2i8_nxv2i8_i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vadc_vim_nxv2i8_nxv2i8_i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, -9, v0 +; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i8.i8( undef, %0, - i8 -9, + i8 9, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vadc_vim_nxv4i8_nxv4i8_i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vadc_vim_nxv4i8_nxv4i8_i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, 9, v0 +; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i8.i8( undef, %0, - i8 9, + i8 -9, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vadc_vim_nxv8i8_nxv8i8_i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vadc_vim_nxv8i8_nxv8i8_i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, -9, v0 +; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i8.i8( undef, %0, - i8 -9, + i8 9, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vadc_vim_nxv16i8_nxv16i8_i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vadc_vim_nxv16i8_nxv16i8_i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, 9, v0 +; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv16i8.i8( undef, %0, - i8 9, + i8 -9, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vadc_vim_nxv32i8_nxv32i8_i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vadc_vim_nxv32i8_nxv32i8_i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, -9, v0 +; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv32i8.i8( undef, %0, - i8 -9, + i8 9, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vadc_vim_nxv64i8_nxv64i8_i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vadc_vim_nxv64i8_nxv64i8_i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, 9, v0 +; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv64i8.i8( undef, %0, - i8 9, + i8 -9, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vadc_vim_nxv1i16_nxv1i16_i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vadc_vim_nxv1i16_nxv1i16_i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, -9, v0 +; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i16.i16( undef, %0, - i16 -9, + i16 9, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vadc_vim_nxv2i16_nxv2i16_i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vadc_vim_nxv2i16_nxv2i16_i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, 9, v0 +; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i16.i16( undef, %0, - i16 9, + i16 -9, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vadc_vim_nxv4i16_nxv4i16_i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vadc_vim_nxv4i16_nxv4i16_i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, -9, v0 +; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i16.i16( undef, %0, - i16 -9, + i16 9, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vadc_vim_nxv8i16_nxv8i16_i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vadc_vim_nxv8i16_nxv8i16_i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, 9, v0 +; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i16.i16( undef, %0, - i16 9, + i16 -9, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vadc_vim_nxv16i16_nxv16i16_i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vadc_vim_nxv16i16_nxv16i16_i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, -9, v0 +; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv16i16.i16( undef, %0, - i16 -9, + i16 9, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vadc_vim_nxv32i16_nxv32i16_i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vadc_vim_nxv32i16_nxv32i16_i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vadc.vim v8, v8, 9, v0 +; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv32i16.i16( undef, %0, - i16 9, + i16 -9, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vadc_vim_nxv1i32_nxv1i32_i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vadc_vim_nxv1i32_nxv1i32_i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -1290,12 +1340,12 @@ entry: %0, i32 -9, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vadc_vim_nxv2i32_nxv2i32_i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vadc_vim_nxv2i32_nxv2i32_i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -1307,12 +1357,12 @@ entry: %0, i32 9, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vadc_vim_nxv4i32_nxv4i32_i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vadc_vim_nxv4i32_nxv4i32_i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -1324,12 +1374,12 @@ entry: %0, i32 -9, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vadc_vim_nxv8i32_nxv8i32_i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vadc_vim_nxv8i32_nxv8i32_i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -1341,12 +1391,12 @@ entry: %0, i32 9, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vadc_vim_nxv16i32_nxv16i32_i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vadc_vim_nxv16i32_nxv16i32_i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -1358,12 +1408,12 @@ entry: %0, i32 -9, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vadc_vim_nxv1i64_nxv1i64_i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vadc_vim_nxv1i64_nxv1i64_i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -1375,12 +1425,12 @@ entry: %0, i64 9, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vadc_vim_nxv2i64_nxv2i64_i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vadc_vim_nxv2i64_nxv2i64_i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -1392,12 +1442,12 @@ entry: %0, i64 -9, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vadc_vim_nxv4i64_nxv4i64_i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vadc_vim_nxv4i64_nxv4i64_i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -1409,12 +1459,12 @@ entry: %0, i64 9, %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vadc_vim_nxv8i64_nxv8i64_i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vadc_vim_nxv8i64_nxv8i64_i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma @@ -1426,7 +1476,7 @@ entry: %0, i64 -9, %1, - i64 %2) + iXLen %2) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll deleted file mode 100644 index c208867..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll +++ /dev/null @@ -1,2800 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vand.nxv1i8.nxv1i8( - , - , - , - i64); - -define @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vand.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv1i8.nxv1i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv1i8.nxv1i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vand.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv2i8.nxv2i8( - , - , - , - i64); - -define @intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vand.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv2i8.nxv2i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv2i8.nxv2i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vand.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv4i8.nxv4i8( - , - , - , - i64); - -define @intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vand.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv4i8.nxv4i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv4i8.nxv4i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vand.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv8i8.nxv8i8( - , - , - , - i64); - -define @intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vand.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv8i8.nxv8i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv8i8.nxv8i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vand.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv16i8.nxv16i8( - , - , - , - i64); - -define @intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vand.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv16i8.nxv16i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv16i8.nxv16i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vand.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv32i8.nxv32i8( - , - , - , - i64); - -define @intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vand.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv32i8.nxv32i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv32i8.nxv32i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vand.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv64i8.nxv64i8( - , - , - , - i64); - -define @intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vand.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv64i8.nxv64i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv64i8.nxv64i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vand.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv1i16.nxv1i16( - , - , - , - i64); - -define @intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vand.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv1i16.nxv1i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv1i16.nxv1i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vand.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv2i16.nxv2i16( - , - , - , - i64); - -define @intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vand.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv2i16.nxv2i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv2i16.nxv2i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vand.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv4i16.nxv4i16( - , - , - , - i64); - -define @intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vand.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv4i16.nxv4i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv4i16.nxv4i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vand.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv8i16.nxv8i16( - , - , - , - i64); - -define @intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vand.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv8i16.nxv8i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv8i16.nxv8i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vand.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv16i16.nxv16i16( - , - , - , - i64); - -define @intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vand.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv16i16.nxv16i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv16i16.nxv16i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vand.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv32i16.nxv32i16( - , - , - , - i64); - -define @intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vand.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv32i16.nxv32i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv32i16.nxv32i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vand.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv1i32.nxv1i32( - , - , - , - i64); - -define @intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vand.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv1i32.nxv1i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv1i32.nxv1i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vand.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv2i32.nxv2i32( - , - , - , - i64); - -define @intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vand.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv2i32.nxv2i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv2i32.nxv2i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vand.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv4i32.nxv4i32( - , - , - , - i64); - -define @intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vand.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv4i32.nxv4i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv4i32.nxv4i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vand.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv8i32.nxv8i32( - , - , - , - i64); - -define @intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vand.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv8i32.nxv8i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv8i32.nxv8i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vand.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv16i32.nxv16i32( - , - , - , - i64); - -define @intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vand.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv16i32.nxv16i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv16i32.nxv16i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vand.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv1i64.nxv1i64( - , - , - , - i64); - -define @intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vand.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv1i64.nxv1i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv1i64.nxv1i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vand.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv2i64.nxv2i64( - , - , - , - i64); - -define @intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vand.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv2i64.nxv2i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv2i64.nxv2i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vand.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv4i64.nxv4i64( - , - , - , - i64); - -define @intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vand.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv4i64.nxv4i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv4i64.nxv4i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vand.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv8i64.nxv8i64( - , - , - , - i64); - -define @intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vand.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv8i64.nxv8i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv8i64.nxv8i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vand.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv1i8.i8( - , - , - i8, - i64); - -define @intrinsic_vand_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vx_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv1i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv1i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vand.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv1i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv2i8.i8( - , - , - i8, - i64); - -define @intrinsic_vand_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vx_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv2i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv2i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vand.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv2i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv4i8.i8( - , - , - i8, - i64); - -define @intrinsic_vand_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vx_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv4i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv4i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vand.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv4i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv8i8.i8( - , - , - i8, - i64); - -define @intrinsic_vand_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vx_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv8i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv8i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vand.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv8i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv16i8.i8( - , - , - i8, - i64); - -define @intrinsic_vand_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vx_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv16i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv16i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vand.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv16i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv32i8.i8( - , - , - i8, - i64); - -define @intrinsic_vand_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vx_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv32i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv32i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vand.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv32i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv64i8.i8( - , - , - i8, - i64); - -define @intrinsic_vand_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vx_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv64i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv64i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vand.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv64i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv1i16.i16( - , - , - i16, - i64); - -define @intrinsic_vand_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vx_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv1i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv1i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vand.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv1i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv2i16.i16( - , - , - i16, - i64); - -define @intrinsic_vand_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vx_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv2i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv2i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vand.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv2i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv4i16.i16( - , - , - i16, - i64); - -define @intrinsic_vand_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vx_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv4i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv4i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vand.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv4i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv8i16.i16( - , - , - i16, - i64); - -define @intrinsic_vand_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vx_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv8i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv8i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vand.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv8i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv16i16.i16( - , - , - i16, - i64); - -define @intrinsic_vand_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vx_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv16i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv16i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vand.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv16i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv32i16.i16( - , - , - i16, - i64); - -define @intrinsic_vand_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vx_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv32i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv32i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vand.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv32i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv1i32.i32( - , - , - i32, - i64); - -define @intrinsic_vand_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vx_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv1i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv1i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vand.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv1i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv2i32.i32( - , - , - i32, - i64); - -define @intrinsic_vand_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vx_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv2i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv2i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vand.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv2i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv4i32.i32( - , - , - i32, - i64); - -define @intrinsic_vand_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vx_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv4i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv4i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vand.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv4i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv8i32.i32( - , - , - i32, - i64); - -define @intrinsic_vand_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vx_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv8i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv8i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vand.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv8i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv16i32.i32( - , - , - i32, - i64); - -define @intrinsic_vand_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vx_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv16i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv16i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vand.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv16i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv1i64.i64( - , - , - i64, - i64); - -define @intrinsic_vand_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv1i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv1i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vand.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv2i64.i64( - , - , - i64, - i64); - -define @intrinsic_vand_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv2i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv2i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vand.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv4i64.i64( - , - , - i64, - i64); - -define @intrinsic_vand_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv4i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv4i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vand.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vand.nxv8i64.i64( - , - , - i64, - i64); - -define @intrinsic_vand_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv8i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vand.mask.nxv8i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vand.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv8i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -define @intrinsic_vand_vi_nxv1i8_nxv1i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vand_vi_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vand.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv1i8.i8( - undef, - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vand.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv1i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vand_vi_nxv2i8_nxv2i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vand_vi_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vand.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv2i8.i8( - undef, - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vand.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv2i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vand_vi_nxv4i8_nxv4i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vand_vi_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vand.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv4i8.i8( - undef, - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vand.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv4i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vand_vi_nxv8i8_nxv8i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vand_vi_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vand.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv8i8.i8( - undef, - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vand.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv8i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vand_vi_nxv16i8_nxv16i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vand_vi_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vand.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv16i8.i8( - undef, - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vand.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv16i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vand_vi_nxv32i8_nxv32i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vand_vi_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vand.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv32i8.i8( - undef, - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vand.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv32i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vand_vi_nxv64i8_nxv64i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vand_vi_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vand.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv64i8.i8( - undef, - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vand.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv64i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vand_vi_nxv1i16_nxv1i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vand_vi_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vand.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv1i16.i16( - undef, - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vand.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv1i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vand_vi_nxv2i16_nxv2i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vand_vi_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vand.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv2i16.i16( - undef, - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vand.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv2i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vand_vi_nxv4i16_nxv4i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vand_vi_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vand.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv4i16.i16( - undef, - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vand.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv4i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vand_vi_nxv8i16_nxv8i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vand_vi_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vand.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv8i16.i16( - undef, - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vand.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv8i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vand_vi_nxv16i16_nxv16i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vand_vi_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vand.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv16i16.i16( - undef, - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vand.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv16i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vand_vi_nxv32i16_nxv32i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vand_vi_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vand.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv32i16.i16( - undef, - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vand.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv32i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vand_vi_nxv1i32_nxv1i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vand_vi_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vand.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv1i32.i32( - undef, - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vand.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv1i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vand_vi_nxv2i32_nxv2i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vand_vi_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vand.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv2i32.i32( - undef, - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vand.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv2i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vand_vi_nxv4i32_nxv4i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vand_vi_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vand.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv4i32.i32( - undef, - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vand.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv4i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vand_vi_nxv8i32_nxv8i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vand_vi_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vand.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv8i32.i32( - undef, - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vand.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv8i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vand_vi_nxv16i32_nxv16i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vand_vi_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vand.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv16i32.i32( - undef, - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vand.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv16i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vand_vi_nxv1i64_nxv1i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vand_vi_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vand.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv1i64.i64( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vand.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv1i64.i64( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vand_vi_nxv2i64_nxv2i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vand_vi_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vand.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv2i64.i64( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vand.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv2i64.i64( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vand_vi_nxv4i64_nxv4i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vand_vi_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vand.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv4i64.i64( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vand.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv4i64.i64( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vand_vi_nxv8i64_nxv8i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vand_vi_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vand.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.nxv8i64.i64( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vand.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vand.mask.nxv8i64.i64( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vand.ll similarity index 83% rename from llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vand.ll index f09efe0..8bbb009 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vand.nxv1i8.nxv1i8( , , , - i32); + iXLen); -define @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -18,7 +20,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -28,10 +30,10 @@ declare @llvm.riscv.vand.mask.nxv1i8.nxv1i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -43,7 +45,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -52,9 +54,9 @@ declare @llvm.riscv.vand.nxv2i8.nxv2i8( , , , - i32); + iXLen); -define @intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -65,7 +67,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -75,10 +77,10 @@ declare @llvm.riscv.vand.mask.nxv2i8.nxv2i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -90,7 +92,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -99,9 +101,9 @@ declare @llvm.riscv.vand.nxv4i8.nxv4i8( , , , - i32); + iXLen); -define @intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -112,7 +114,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -122,10 +124,10 @@ declare @llvm.riscv.vand.mask.nxv4i8.nxv4i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -137,7 +139,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -146,9 +148,9 @@ declare @llvm.riscv.vand.nxv8i8.nxv8i8( , , , - i32); + iXLen); -define @intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -159,7 +161,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -169,10 +171,10 @@ declare @llvm.riscv.vand.mask.nxv8i8.nxv8i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -184,7 +186,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -193,9 +195,9 @@ declare @llvm.riscv.vand.nxv16i8.nxv16i8( , , , - i32); + iXLen); -define @intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -206,7 +208,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -216,10 +218,10 @@ declare @llvm.riscv.vand.mask.nxv16i8.nxv16i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -231,7 +233,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -240,9 +242,9 @@ declare @llvm.riscv.vand.nxv32i8.nxv32i8( , , , - i32); + iXLen); -define @intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -253,7 +255,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -263,10 +265,10 @@ declare @llvm.riscv.vand.mask.nxv32i8.nxv32i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -278,7 +280,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -287,9 +289,9 @@ declare @llvm.riscv.vand.nxv64i8.nxv64i8( , , , - i32); + iXLen); -define @intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma @@ -300,7 +302,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vand.mask.nxv64i8.nxv64i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) @@ -326,7 +328,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -335,9 +337,9 @@ declare @llvm.riscv.vand.nxv1i16.nxv1i16( , , , - i32); + iXLen); -define @intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -348,7 +350,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -358,10 +360,10 @@ declare @llvm.riscv.vand.mask.nxv1i16.nxv1i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -373,7 +375,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -382,9 +384,9 @@ declare @llvm.riscv.vand.nxv2i16.nxv2i16( , , , - i32); + iXLen); -define @intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -395,7 +397,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -405,10 +407,10 @@ declare @llvm.riscv.vand.mask.nxv2i16.nxv2i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -420,7 +422,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -429,9 +431,9 @@ declare @llvm.riscv.vand.nxv4i16.nxv4i16( , , , - i32); + iXLen); -define @intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -442,7 +444,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -452,10 +454,10 @@ declare @llvm.riscv.vand.mask.nxv4i16.nxv4i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -467,7 +469,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -476,9 +478,9 @@ declare @llvm.riscv.vand.nxv8i16.nxv8i16( , , , - i32); + iXLen); -define @intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -489,7 +491,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -499,10 +501,10 @@ declare @llvm.riscv.vand.mask.nxv8i16.nxv8i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -514,7 +516,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -523,9 +525,9 @@ declare @llvm.riscv.vand.nxv16i16.nxv16i16( , , , - i32); + iXLen); -define @intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -536,7 +538,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -546,10 +548,10 @@ declare @llvm.riscv.vand.mask.nxv16i16.nxv16i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -561,7 +563,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -570,9 +572,9 @@ declare @llvm.riscv.vand.nxv32i16.nxv32i16( , , , - i32); + iXLen); -define @intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma @@ -583,7 +585,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -593,10 +595,10 @@ declare @llvm.riscv.vand.mask.nxv32i16.nxv32i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -609,7 +611,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -618,9 +620,9 @@ declare @llvm.riscv.vand.nxv1i32.nxv1i32( , , , - i32); + iXLen); -define @intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -631,7 +633,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -641,10 +643,10 @@ declare @llvm.riscv.vand.mask.nxv1i32.nxv1i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -656,7 +658,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -665,9 +667,9 @@ declare @llvm.riscv.vand.nxv2i32.nxv2i32( , , , - i32); + iXLen); -define @intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -678,7 +680,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -688,10 +690,10 @@ declare @llvm.riscv.vand.mask.nxv2i32.nxv2i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -703,7 +705,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -712,9 +714,9 @@ declare @llvm.riscv.vand.nxv4i32.nxv4i32( , , , - i32); + iXLen); -define @intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -725,7 +727,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -735,10 +737,10 @@ declare @llvm.riscv.vand.mask.nxv4i32.nxv4i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -750,7 +752,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -759,9 +761,9 @@ declare @llvm.riscv.vand.nxv8i32.nxv8i32( , , , - i32); + iXLen); -define @intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -772,7 +774,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -782,10 +784,10 @@ declare @llvm.riscv.vand.mask.nxv8i32.nxv8i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -797,7 +799,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -806,9 +808,9 @@ declare @llvm.riscv.vand.nxv16i32.nxv16i32( , , , - i32); + iXLen); -define @intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -819,7 +821,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -829,10 +831,10 @@ declare @llvm.riscv.vand.mask.nxv16i32.nxv16i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -845,7 +847,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -854,9 +856,9 @@ declare @llvm.riscv.vand.nxv1i64.nxv1i64( , , , - i32); + iXLen); -define @intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -867,7 +869,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -877,10 +879,10 @@ declare @llvm.riscv.vand.mask.nxv1i64.nxv1i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -892,7 +894,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -901,9 +903,9 @@ declare @llvm.riscv.vand.nxv2i64.nxv2i64( , , , - i32); + iXLen); -define @intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -914,7 +916,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -924,10 +926,10 @@ declare @llvm.riscv.vand.mask.nxv2i64.nxv2i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -939,7 +941,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vand.nxv4i64.nxv4i64( , , , - i32); + iXLen); -define @intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -961,7 +963,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -971,10 +973,10 @@ declare @llvm.riscv.vand.mask.nxv4i64.nxv4i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -986,7 +988,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -995,9 +997,9 @@ declare @llvm.riscv.vand.nxv8i64.nxv8i64( , , , - i32); + iXLen); -define @intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma @@ -1008,7 +1010,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -1018,10 +1020,10 @@ declare @llvm.riscv.vand.mask.nxv8i64.nxv8i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -1034,7 +1036,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1043,9 +1045,9 @@ declare @llvm.riscv.vand.nxv1i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vand_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vand_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -1056,7 +1058,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1066,10 +1068,10 @@ declare @llvm.riscv.vand.mask.nxv1i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu @@ -1081,7 +1083,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1090,9 +1092,9 @@ declare @llvm.riscv.vand.nxv2i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vand_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vand_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -1103,7 +1105,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1113,10 +1115,10 @@ declare @llvm.riscv.vand.mask.nxv2i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu @@ -1128,7 +1130,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1137,9 +1139,9 @@ declare @llvm.riscv.vand.nxv4i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vand_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vand_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -1150,7 +1152,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1160,10 +1162,10 @@ declare @llvm.riscv.vand.mask.nxv4i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu @@ -1175,7 +1177,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1184,9 +1186,9 @@ declare @llvm.riscv.vand.nxv8i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vand_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vand_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -1197,7 +1199,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1207,10 +1209,10 @@ declare @llvm.riscv.vand.mask.nxv8i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu @@ -1222,7 +1224,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1231,9 +1233,9 @@ declare @llvm.riscv.vand.nxv16i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vand_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vand_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -1244,7 +1246,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1254,10 +1256,10 @@ declare @llvm.riscv.vand.mask.nxv16i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu @@ -1269,7 +1271,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1278,9 +1280,9 @@ declare @llvm.riscv.vand.nxv32i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vand_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vand_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -1291,7 +1293,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1301,10 +1303,10 @@ declare @llvm.riscv.vand.mask.nxv32i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu @@ -1316,7 +1318,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1325,9 +1327,9 @@ declare @llvm.riscv.vand.nxv64i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vand_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vand_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma @@ -1338,7 +1340,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1348,10 +1350,10 @@ declare @llvm.riscv.vand.mask.nxv64i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu @@ -1363,7 +1365,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1372,9 +1374,9 @@ declare @llvm.riscv.vand.nxv1i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vand_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vand_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -1385,7 +1387,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1395,10 +1397,10 @@ declare @llvm.riscv.vand.mask.nxv1i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu @@ -1410,7 +1412,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1419,9 +1421,9 @@ declare @llvm.riscv.vand.nxv2i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vand_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vand_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -1432,7 +1434,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1442,10 +1444,10 @@ declare @llvm.riscv.vand.mask.nxv2i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu @@ -1457,7 +1459,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1466,9 +1468,9 @@ declare @llvm.riscv.vand.nxv4i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vand_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vand_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -1479,7 +1481,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1489,10 +1491,10 @@ declare @llvm.riscv.vand.mask.nxv4i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -1504,7 +1506,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1513,9 +1515,9 @@ declare @llvm.riscv.vand.nxv8i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vand_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vand_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -1526,7 +1528,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1536,10 +1538,10 @@ declare @llvm.riscv.vand.mask.nxv8i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -1551,7 +1553,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1560,9 +1562,9 @@ declare @llvm.riscv.vand.nxv16i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vand_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vand_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -1573,7 +1575,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1583,10 +1585,10 @@ declare @llvm.riscv.vand.mask.nxv16i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu @@ -1598,7 +1600,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1607,9 +1609,9 @@ declare @llvm.riscv.vand.nxv32i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vand_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vand_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma @@ -1620,7 +1622,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1630,10 +1632,10 @@ declare @llvm.riscv.vand.mask.nxv32i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu @@ -1645,7 +1647,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1654,9 +1656,9 @@ declare @llvm.riscv.vand.nxv1i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vand_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vand_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -1667,7 +1669,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1677,10 +1679,10 @@ declare @llvm.riscv.vand.mask.nxv1i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu @@ -1692,7 +1694,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1701,9 +1703,9 @@ declare @llvm.riscv.vand.nxv2i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vand_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vand_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -1714,7 +1716,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1724,10 +1726,10 @@ declare @llvm.riscv.vand.mask.nxv2i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu @@ -1739,7 +1741,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1748,9 +1750,9 @@ declare @llvm.riscv.vand.nxv4i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vand_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vand_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -1761,7 +1763,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1771,10 +1773,10 @@ declare @llvm.riscv.vand.mask.nxv4i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -1786,7 +1788,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1795,9 +1797,9 @@ declare @llvm.riscv.vand.nxv8i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vand_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vand_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -1808,7 +1810,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1818,10 +1820,10 @@ declare @llvm.riscv.vand.mask.nxv8i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -1833,7 +1835,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1842,9 +1844,9 @@ declare @llvm.riscv.vand.nxv16i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vand_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vand_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma @@ -1855,7 +1857,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1865,10 +1867,10 @@ declare @llvm.riscv.vand.mask.nxv16i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu @@ -1880,7 +1882,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1889,26 +1891,32 @@ declare @llvm.riscv.vand.nxv1i64.i64( , , i64, - i32); - -define @intrinsic_vand_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vand.vv v8, v8, v9 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vand_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vand_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vand_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1918,28 +1926,34 @@ declare @llvm.riscv.vand.mask.nxv1i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vand.vv v8, v9, v10, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vand.vv v8, v9, v10, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vand.vx v8, v9, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vand.mask.nxv1i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1948,26 +1962,32 @@ declare @llvm.riscv.vand.nxv2i64.i64( , , i64, - i32); - -define @intrinsic_vand_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vand.vv v8, v8, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vand_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vand_vx_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vand_vx_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1977,28 +1997,34 @@ declare @llvm.riscv.vand.mask.nxv2i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vand.vv v8, v10, v12, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vand.vv v8, v10, v12, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vand.vx v8, v10, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vand.mask.nxv2i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -2007,26 +2033,32 @@ declare @llvm.riscv.vand.nxv4i64.i64( , , i64, - i32); - -define @intrinsic_vand_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vand.vv v8, v8, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vand_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vand_vx_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vand_vx_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -2036,28 +2068,34 @@ declare @llvm.riscv.vand.mask.nxv4i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vand.vv v8, v12, v16, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vand.vv v8, v12, v16, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vand.vx v8, v12, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vand.mask.nxv4i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -2066,26 +2104,32 @@ declare @llvm.riscv.vand.nxv8i64.i64( , , i64, - i32); - -define @intrinsic_vand_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vand_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vand.vv v8, v8, v16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vand_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vand_vx_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vand_vx_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -2095,33 +2139,39 @@ declare @llvm.riscv.vand.mask.nxv8i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vand.vv v8, v16, v24, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vand.vv v8, v16, v24, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vand.vx v8, v16, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vand.mask.nxv8i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } -define @intrinsic_vand_vi_nxv1i8_nxv1i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vand_vi_nxv1i8_nxv1i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -2132,12 +2182,12 @@ entry: undef, %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -2149,12 +2199,12 @@ entry: %1, i8 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vand_vi_nxv2i8_nxv2i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vand_vi_nxv2i8_nxv2i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -2165,12 +2215,12 @@ entry: undef, %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -2182,12 +2232,12 @@ entry: %1, i8 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vand_vi_nxv4i8_nxv4i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vand_vi_nxv4i8_nxv4i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -2198,12 +2248,12 @@ entry: undef, %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -2215,12 +2265,12 @@ entry: %1, i8 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vand_vi_nxv8i8_nxv8i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vand_vi_nxv8i8_nxv8i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -2231,12 +2281,12 @@ entry: undef, %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -2248,12 +2298,12 @@ entry: %1, i8 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vand_vi_nxv16i8_nxv16i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vand_vi_nxv16i8_nxv16i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -2264,12 +2314,12 @@ entry: undef, %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -2281,12 +2331,12 @@ entry: %1, i8 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vand_vi_nxv32i8_nxv32i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vand_vi_nxv32i8_nxv32i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -2297,12 +2347,12 @@ entry: undef, %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -2314,12 +2364,12 @@ entry: %1, i8 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vand_vi_nxv64i8_nxv64i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vand_vi_nxv64i8_nxv64i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma @@ -2330,12 +2380,12 @@ entry: undef, %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu @@ -2347,12 +2397,12 @@ entry: %1, i8 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vand_vi_nxv1i16_nxv1i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vand_vi_nxv1i16_nxv1i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -2363,12 +2413,12 @@ entry: undef, %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -2380,12 +2430,12 @@ entry: %1, i16 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vand_vi_nxv2i16_nxv2i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vand_vi_nxv2i16_nxv2i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -2396,12 +2446,12 @@ entry: undef, %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -2413,12 +2463,12 @@ entry: %1, i16 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vand_vi_nxv4i16_nxv4i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vand_vi_nxv4i16_nxv4i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -2429,12 +2479,12 @@ entry: undef, %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -2446,12 +2496,12 @@ entry: %1, i16 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vand_vi_nxv8i16_nxv8i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vand_vi_nxv8i16_nxv8i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -2462,12 +2512,12 @@ entry: undef, %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -2479,12 +2529,12 @@ entry: %1, i16 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vand_vi_nxv16i16_nxv16i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vand_vi_nxv16i16_nxv16i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -2495,12 +2545,12 @@ entry: undef, %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -2512,12 +2562,12 @@ entry: %1, i16 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vand_vi_nxv32i16_nxv32i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vand_vi_nxv32i16_nxv32i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma @@ -2528,12 +2578,12 @@ entry: undef, %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -2545,12 +2595,12 @@ entry: %1, i16 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vand_vi_nxv1i32_nxv1i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vand_vi_nxv1i32_nxv1i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -2561,12 +2611,12 @@ entry: undef, %0, i32 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -2578,12 +2628,12 @@ entry: %1, i32 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vand_vi_nxv2i32_nxv2i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vand_vi_nxv2i32_nxv2i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -2594,12 +2644,12 @@ entry: undef, %0, i32 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -2611,12 +2661,12 @@ entry: %1, i32 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vand_vi_nxv4i32_nxv4i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vand_vi_nxv4i32_nxv4i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -2627,12 +2677,12 @@ entry: undef, %0, i32 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -2644,12 +2694,12 @@ entry: %1, i32 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vand_vi_nxv8i32_nxv8i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vand_vi_nxv8i32_nxv8i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -2660,12 +2710,12 @@ entry: undef, %0, i32 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -2677,12 +2727,12 @@ entry: %1, i32 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vand_vi_nxv16i32_nxv16i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vand_vi_nxv16i32_nxv16i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -2693,12 +2743,12 @@ entry: undef, %0, i32 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -2710,12 +2760,12 @@ entry: %1, i32 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vand_vi_nxv1i64_nxv1i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vand_vi_nxv1i64_nxv1i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -2726,12 +2776,12 @@ entry: undef, %0, i64 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -2743,12 +2793,12 @@ entry: %1, i64 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vand_vi_nxv2i64_nxv2i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vand_vi_nxv2i64_nxv2i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -2759,12 +2809,12 @@ entry: undef, %0, i64 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -2776,12 +2826,12 @@ entry: %1, i64 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vand_vi_nxv4i64_nxv4i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vand_vi_nxv4i64_nxv4i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -2792,12 +2842,12 @@ entry: undef, %0, i64 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -2809,12 +2859,12 @@ entry: %1, i64 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vand_vi_nxv8i64_nxv8i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vand_vi_nxv8i64_nxv8i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma @@ -2825,12 +2875,12 @@ entry: undef, %0, i64 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -2842,7 +2892,7 @@ entry: %1, i64 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll deleted file mode 100644 index 12dd21c..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll +++ /dev/null @@ -1,2074 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vdiv.nxv1i8.nxv1i8( - , - , - , - i64); - -define @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vdiv.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv1i8.nxv1i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv2i8.nxv2i8( - , - , - , - i64); - -define @intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vdiv.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv2i8.nxv2i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv4i8.nxv4i8( - , - , - , - i64); - -define @intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vdiv.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv4i8.nxv4i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv8i8.nxv8i8( - , - , - , - i64); - -define @intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vdiv.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv8i8.nxv8i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv16i8.nxv16i8( - , - , - , - i64); - -define @intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vdiv.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv16i8.nxv16i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vdiv.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv32i8.nxv32i8( - , - , - , - i64); - -define @intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vdiv.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv32i8.nxv32i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vdiv.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv64i8.nxv64i8( - , - , - , - i64); - -define @intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vdiv.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv64i8.nxv64i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv1i16.nxv1i16( - , - , - , - i64); - -define @intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vdiv.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv1i16.nxv1i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv2i16.nxv2i16( - , - , - , - i64); - -define @intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vdiv.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv2i16.nxv2i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv4i16.nxv4i16( - , - , - , - i64); - -define @intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vdiv.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv4i16.nxv4i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv8i16.nxv8i16( - , - , - , - i64); - -define @intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vdiv.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv8i16.nxv8i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vdiv.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv16i16.nxv16i16( - , - , - , - i64); - -define @intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vdiv.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv16i16.nxv16i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vdiv.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv32i16.nxv32i16( - , - , - , - i64); - -define @intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vdiv.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv32i16.nxv32i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv1i32.nxv1i32( - , - , - , - i64); - -define @intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vdiv.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv1i32.nxv1i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv2i32.nxv2i32( - , - , - , - i64); - -define @intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vdiv.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv2i32.nxv2i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv4i32.nxv4i32( - , - , - , - i64); - -define @intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vdiv.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv4i32.nxv4i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vdiv.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv8i32.nxv8i32( - , - , - , - i64); - -define @intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vdiv.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv8i32.nxv8i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vdiv.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv16i32.nxv16i32( - , - , - , - i64); - -define @intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vdiv.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv16i32.nxv16i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv1i64.nxv1i64( - , - , - , - i64); - -define @intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vdiv.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv1i64.nxv1i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv2i64.nxv2i64( - , - , - , - i64); - -define @intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vdiv.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv2i64.nxv2i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vdiv.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv4i64.nxv4i64( - , - , - , - i64); - -define @intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vdiv.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv4i64.nxv4i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vdiv.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv8i64.nxv8i64( - , - , - , - i64); - -define @intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vdiv.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv8i64.nxv8i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv1i8.i8( - , - , - i8, - i64); - -define @intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vdiv.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv1i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv1i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv1i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv2i8.i8( - , - , - i8, - i64); - -define @intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vdiv.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv2i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv2i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv2i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv4i8.i8( - , - , - i8, - i64); - -define @intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vdiv.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv4i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv4i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv4i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv8i8.i8( - , - , - i8, - i64); - -define @intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vdiv.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv8i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv8i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv8i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv16i8.i8( - , - , - i8, - i64); - -define @intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vdiv.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv16i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv16i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vdiv.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv16i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv32i8.i8( - , - , - i8, - i64); - -define @intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vdiv.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv32i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv32i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vdiv.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv32i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv64i8.i8( - , - , - i8, - i64); - -define @intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vdiv.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv64i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv64i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vdiv.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv64i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv1i16.i16( - , - , - i16, - i64); - -define @intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vdiv.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv1i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv1i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv1i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv2i16.i16( - , - , - i16, - i64); - -define @intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vdiv.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv2i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv2i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv2i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv4i16.i16( - , - , - i16, - i64); - -define @intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vdiv.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv4i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv4i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv4i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv8i16.i16( - , - , - i16, - i64); - -define @intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vdiv.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv8i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv8i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vdiv.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv8i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv16i16.i16( - , - , - i16, - i64); - -define @intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vdiv.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv16i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv16i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vdiv.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv16i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv32i16.i16( - , - , - i16, - i64); - -define @intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vdiv.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv32i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv32i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vdiv.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv32i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv1i32.i32( - , - , - i32, - i64); - -define @intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vdiv.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv1i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv1i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv1i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv2i32.i32( - , - , - i32, - i64); - -define @intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vdiv.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv2i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv2i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv2i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv4i32.i32( - , - , - i32, - i64); - -define @intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vdiv.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv4i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv4i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vdiv.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv4i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv8i32.i32( - , - , - i32, - i64); - -define @intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vdiv.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv8i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv8i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vdiv.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv8i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv16i32.i32( - , - , - i32, - i64); - -define @intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vdiv.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv16i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv16i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vdiv.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv16i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv1i64.i64( - , - , - i64, - i64); - -define @intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vdiv.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv1i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv1i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv2i64.i64( - , - , - i64, - i64); - -define @intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vdiv.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv2i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv2i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vdiv.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv4i64.i64( - , - , - i64, - i64); - -define @intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vdiv.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv4i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv4i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vdiv.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdiv.nxv8i64.i64( - , - , - i64, - i64); - -define @intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vdiv.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.nxv8i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdiv.mask.nxv8i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vdiv.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdiv.mask.nxv8i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv.ll similarity index 80% rename from llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vdiv.ll index d173346..989ceca 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vdiv.nxv1i8.nxv1i8( , , , - i32); + iXLen); -define @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -18,7 +20,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -28,10 +30,10 @@ declare @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -43,7 +45,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -52,9 +54,9 @@ declare @llvm.riscv.vdiv.nxv2i8.nxv2i8( , , , - i32); + iXLen); -define @intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -65,7 +67,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -75,10 +77,10 @@ declare @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -90,7 +92,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -99,9 +101,9 @@ declare @llvm.riscv.vdiv.nxv4i8.nxv4i8( , , , - i32); + iXLen); -define @intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -112,7 +114,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -122,10 +124,10 @@ declare @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -137,7 +139,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -146,9 +148,9 @@ declare @llvm.riscv.vdiv.nxv8i8.nxv8i8( , , , - i32); + iXLen); -define @intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -159,7 +161,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -169,10 +171,10 @@ declare @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -184,7 +186,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -193,9 +195,9 @@ declare @llvm.riscv.vdiv.nxv16i8.nxv16i8( , , , - i32); + iXLen); -define @intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -206,7 +208,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -216,10 +218,10 @@ declare @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -231,7 +233,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -240,9 +242,9 @@ declare @llvm.riscv.vdiv.nxv32i8.nxv32i8( , , , - i32); + iXLen); -define @intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -253,7 +255,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -263,10 +265,10 @@ declare @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -278,7 +280,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -287,9 +289,9 @@ declare @llvm.riscv.vdiv.nxv64i8.nxv64i8( , , , - i32); + iXLen); -define @intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma @@ -300,7 +302,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) @@ -326,7 +328,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -335,9 +337,9 @@ declare @llvm.riscv.vdiv.nxv1i16.nxv1i16( , , , - i32); + iXLen); -define @intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -348,7 +350,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -358,10 +360,10 @@ declare @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -373,7 +375,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -382,9 +384,9 @@ declare @llvm.riscv.vdiv.nxv2i16.nxv2i16( , , , - i32); + iXLen); -define @intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -395,7 +397,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -405,10 +407,10 @@ declare @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -420,7 +422,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -429,9 +431,9 @@ declare @llvm.riscv.vdiv.nxv4i16.nxv4i16( , , , - i32); + iXLen); -define @intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -442,7 +444,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -452,10 +454,10 @@ declare @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -467,7 +469,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -476,9 +478,9 @@ declare @llvm.riscv.vdiv.nxv8i16.nxv8i16( , , , - i32); + iXLen); -define @intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -489,7 +491,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -499,10 +501,10 @@ declare @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -514,7 +516,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -523,9 +525,9 @@ declare @llvm.riscv.vdiv.nxv16i16.nxv16i16( , , , - i32); + iXLen); -define @intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -536,7 +538,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -546,10 +548,10 @@ declare @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -561,7 +563,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -570,9 +572,9 @@ declare @llvm.riscv.vdiv.nxv32i16.nxv32i16( , , , - i32); + iXLen); -define @intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma @@ -583,7 +585,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -593,10 +595,10 @@ declare @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -609,7 +611,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -618,9 +620,9 @@ declare @llvm.riscv.vdiv.nxv1i32.nxv1i32( , , , - i32); + iXLen); -define @intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -631,7 +633,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -641,10 +643,10 @@ declare @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -656,7 +658,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -665,9 +667,9 @@ declare @llvm.riscv.vdiv.nxv2i32.nxv2i32( , , , - i32); + iXLen); -define @intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -678,7 +680,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -688,10 +690,10 @@ declare @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -703,7 +705,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -712,9 +714,9 @@ declare @llvm.riscv.vdiv.nxv4i32.nxv4i32( , , , - i32); + iXLen); -define @intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -725,7 +727,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -735,10 +737,10 @@ declare @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -750,7 +752,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -759,9 +761,9 @@ declare @llvm.riscv.vdiv.nxv8i32.nxv8i32( , , , - i32); + iXLen); -define @intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -772,7 +774,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -782,10 +784,10 @@ declare @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -797,7 +799,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -806,9 +808,9 @@ declare @llvm.riscv.vdiv.nxv16i32.nxv16i32( , , , - i32); + iXLen); -define @intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -819,7 +821,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -829,10 +831,10 @@ declare @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -845,7 +847,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -854,9 +856,9 @@ declare @llvm.riscv.vdiv.nxv1i64.nxv1i64( , , , - i32); + iXLen); -define @intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -867,7 +869,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -877,10 +879,10 @@ declare @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -892,7 +894,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -901,9 +903,9 @@ declare @llvm.riscv.vdiv.nxv2i64.nxv2i64( , , , - i32); + iXLen); -define @intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -914,7 +916,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -924,10 +926,10 @@ declare @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -939,7 +941,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vdiv.nxv4i64.nxv4i64( , , , - i32); + iXLen); -define @intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -961,7 +963,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -971,10 +973,10 @@ declare @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -986,7 +988,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -995,9 +997,9 @@ declare @llvm.riscv.vdiv.nxv8i64.nxv8i64( , , , - i32); + iXLen); -define @intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma @@ -1008,7 +1010,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -1018,10 +1020,10 @@ declare @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -1034,7 +1036,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1043,9 +1045,9 @@ declare @llvm.riscv.vdiv.nxv1i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -1056,7 +1058,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1066,10 +1068,10 @@ declare @llvm.riscv.vdiv.mask.nxv1i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu @@ -1081,7 +1083,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1090,9 +1092,9 @@ declare @llvm.riscv.vdiv.nxv2i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -1103,7 +1105,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1113,10 +1115,10 @@ declare @llvm.riscv.vdiv.mask.nxv2i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu @@ -1128,7 +1130,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1137,9 +1139,9 @@ declare @llvm.riscv.vdiv.nxv4i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -1150,7 +1152,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1160,10 +1162,10 @@ declare @llvm.riscv.vdiv.mask.nxv4i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu @@ -1175,7 +1177,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1184,9 +1186,9 @@ declare @llvm.riscv.vdiv.nxv8i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -1197,7 +1199,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1207,10 +1209,10 @@ declare @llvm.riscv.vdiv.mask.nxv8i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu @@ -1222,7 +1224,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1231,9 +1233,9 @@ declare @llvm.riscv.vdiv.nxv16i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -1244,7 +1246,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1254,10 +1256,10 @@ declare @llvm.riscv.vdiv.mask.nxv16i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu @@ -1269,7 +1271,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1278,9 +1280,9 @@ declare @llvm.riscv.vdiv.nxv32i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -1291,7 +1293,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1301,10 +1303,10 @@ declare @llvm.riscv.vdiv.mask.nxv32i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu @@ -1316,7 +1318,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1325,9 +1327,9 @@ declare @llvm.riscv.vdiv.nxv64i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma @@ -1338,7 +1340,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1348,10 +1350,10 @@ declare @llvm.riscv.vdiv.mask.nxv64i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu @@ -1363,7 +1365,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1372,9 +1374,9 @@ declare @llvm.riscv.vdiv.nxv1i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -1385,7 +1387,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1395,10 +1397,10 @@ declare @llvm.riscv.vdiv.mask.nxv1i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu @@ -1410,7 +1412,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1419,9 +1421,9 @@ declare @llvm.riscv.vdiv.nxv2i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -1432,7 +1434,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1442,10 +1444,10 @@ declare @llvm.riscv.vdiv.mask.nxv2i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu @@ -1457,7 +1459,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1466,9 +1468,9 @@ declare @llvm.riscv.vdiv.nxv4i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -1479,7 +1481,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1489,10 +1491,10 @@ declare @llvm.riscv.vdiv.mask.nxv4i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -1504,7 +1506,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1513,9 +1515,9 @@ declare @llvm.riscv.vdiv.nxv8i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -1526,7 +1528,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1536,10 +1538,10 @@ declare @llvm.riscv.vdiv.mask.nxv8i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -1551,7 +1553,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1560,9 +1562,9 @@ declare @llvm.riscv.vdiv.nxv16i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -1573,7 +1575,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1583,10 +1585,10 @@ declare @llvm.riscv.vdiv.mask.nxv16i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu @@ -1598,7 +1600,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1607,9 +1609,9 @@ declare @llvm.riscv.vdiv.nxv32i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma @@ -1620,7 +1622,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1630,10 +1632,10 @@ declare @llvm.riscv.vdiv.mask.nxv32i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu @@ -1645,7 +1647,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1654,9 +1656,9 @@ declare @llvm.riscv.vdiv.nxv1i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -1667,7 +1669,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1677,10 +1679,10 @@ declare @llvm.riscv.vdiv.mask.nxv1i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu @@ -1692,7 +1694,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1701,9 +1703,9 @@ declare @llvm.riscv.vdiv.nxv2i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -1714,7 +1716,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1724,10 +1726,10 @@ declare @llvm.riscv.vdiv.mask.nxv2i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu @@ -1739,7 +1741,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1748,9 +1750,9 @@ declare @llvm.riscv.vdiv.nxv4i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -1761,7 +1763,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1771,10 +1773,10 @@ declare @llvm.riscv.vdiv.mask.nxv4i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -1786,7 +1788,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1795,9 +1797,9 @@ declare @llvm.riscv.vdiv.nxv8i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -1808,7 +1810,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1818,10 +1820,10 @@ declare @llvm.riscv.vdiv.mask.nxv8i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -1833,7 +1835,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1842,9 +1844,9 @@ declare @llvm.riscv.vdiv.nxv16i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma @@ -1855,7 +1857,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1865,10 +1867,10 @@ declare @llvm.riscv.vdiv.mask.nxv16i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu @@ -1880,7 +1882,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1889,26 +1891,32 @@ declare @llvm.riscv.vdiv.nxv1i64.i64( , , i64, - i32); - -define @intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vdiv.vv v8, v8, v9 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vdiv.vv v8, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vdiv.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv1i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1918,28 +1926,34 @@ declare @llvm.riscv.vdiv.mask.nxv1i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vdiv.vv v8, v9, v10, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vdiv.vx v8, v9, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vdiv.mask.nxv1i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1948,26 +1962,32 @@ declare @llvm.riscv.vdiv.nxv2i64.i64( , , i64, - i32); - -define @intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vdiv.vv v8, v8, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vdiv.vv v8, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vdiv.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv2i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1977,28 +1997,34 @@ declare @llvm.riscv.vdiv.mask.nxv2i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vdiv.vv v8, v10, v12, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vdiv.vv v8, v10, v12, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vdiv.vx v8, v10, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vdiv.mask.nxv2i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -2007,26 +2033,32 @@ declare @llvm.riscv.vdiv.nxv4i64.i64( , , i64, - i32); - -define @intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vdiv.vv v8, v8, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vdiv.vv v8, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vdiv.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv4i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -2036,28 +2068,34 @@ declare @llvm.riscv.vdiv.mask.nxv4i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vdiv.vv v8, v12, v16, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vdiv.vv v8, v12, v16, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vdiv.vx v8, v12, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vdiv.mask.nxv4i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -2066,26 +2104,32 @@ declare @llvm.riscv.vdiv.nxv8i64.i64( , , i64, - i32); - -define @intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vdiv.vv v8, v8, v16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vdiv.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vdiv.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv8i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -2095,28 +2139,34 @@ declare @llvm.riscv.vdiv.mask.nxv8i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vdiv.vv v8, v16, v24, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vdiv.vx v8, v16, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vdiv.mask.nxv8i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll deleted file mode 100644 index a93f01e..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll +++ /dev/null @@ -1,2074 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vdivu.nxv1i8.nxv1i8( - , - , - , - i64); - -define @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vdivu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv1i8.nxv1i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv2i8.nxv2i8( - , - , - , - i64); - -define @intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vdivu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv2i8.nxv2i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv4i8.nxv4i8( - , - , - , - i64); - -define @intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vdivu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv4i8.nxv4i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv8i8.nxv8i8( - , - , - , - i64); - -define @intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vdivu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv8i8.nxv8i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv16i8.nxv16i8( - , - , - , - i64); - -define @intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vdivu.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv16i8.nxv16i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vdivu.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv32i8.nxv32i8( - , - , - , - i64); - -define @intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vdivu.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv32i8.nxv32i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vdivu.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv64i8.nxv64i8( - , - , - , - i64); - -define @intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vdivu.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv64i8.nxv64i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv1i16.nxv1i16( - , - , - , - i64); - -define @intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vdivu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv1i16.nxv1i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv2i16.nxv2i16( - , - , - , - i64); - -define @intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vdivu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv2i16.nxv2i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv4i16.nxv4i16( - , - , - , - i64); - -define @intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vdivu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv4i16.nxv4i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv8i16.nxv8i16( - , - , - , - i64); - -define @intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vdivu.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv8i16.nxv8i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vdivu.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv16i16.nxv16i16( - , - , - , - i64); - -define @intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vdivu.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv16i16.nxv16i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vdivu.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv32i16.nxv32i16( - , - , - , - i64); - -define @intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vdivu.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv32i16.nxv32i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv1i32.nxv1i32( - , - , - , - i64); - -define @intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vdivu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv1i32.nxv1i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv2i32.nxv2i32( - , - , - , - i64); - -define @intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vdivu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv2i32.nxv2i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv4i32.nxv4i32( - , - , - , - i64); - -define @intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vdivu.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv4i32.nxv4i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vdivu.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv8i32.nxv8i32( - , - , - , - i64); - -define @intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vdivu.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv8i32.nxv8i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vdivu.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv16i32.nxv16i32( - , - , - , - i64); - -define @intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vdivu.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv16i32.nxv16i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv1i64.nxv1i64( - , - , - , - i64); - -define @intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vdivu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv1i64.nxv1i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv2i64.nxv2i64( - , - , - , - i64); - -define @intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vdivu.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv2i64.nxv2i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vdivu.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv4i64.nxv4i64( - , - , - , - i64); - -define @intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vdivu.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv4i64.nxv4i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vdivu.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv8i64.nxv8i64( - , - , - , - i64); - -define @intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vdivu.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv8i64.nxv8i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv1i8.i8( - , - , - i8, - i64); - -define @intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vdivu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv1i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv1i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv1i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv2i8.i8( - , - , - i8, - i64); - -define @intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vdivu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv2i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv2i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv2i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv4i8.i8( - , - , - i8, - i64); - -define @intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vdivu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv4i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv4i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv4i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv8i8.i8( - , - , - i8, - i64); - -define @intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vdivu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv8i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv8i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv8i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv16i8.i8( - , - , - i8, - i64); - -define @intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vdivu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv16i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv16i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vdivu.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv16i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv32i8.i8( - , - , - i8, - i64); - -define @intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vdivu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv32i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv32i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vdivu.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv32i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv64i8.i8( - , - , - i8, - i64); - -define @intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vdivu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv64i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv64i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vdivu.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv64i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv1i16.i16( - , - , - i16, - i64); - -define @intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vdivu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv1i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv1i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv1i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv2i16.i16( - , - , - i16, - i64); - -define @intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vdivu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv2i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv2i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv2i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv4i16.i16( - , - , - i16, - i64); - -define @intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vdivu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv4i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv4i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv4i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv8i16.i16( - , - , - i16, - i64); - -define @intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vdivu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv8i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv8i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vdivu.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv8i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv16i16.i16( - , - , - i16, - i64); - -define @intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vdivu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv16i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv16i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vdivu.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv16i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv32i16.i16( - , - , - i16, - i64); - -define @intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vdivu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv32i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv32i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vdivu.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv32i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv1i32.i32( - , - , - i32, - i64); - -define @intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vdivu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv1i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv1i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv1i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv2i32.i32( - , - , - i32, - i64); - -define @intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vdivu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv2i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv2i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv2i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv4i32.i32( - , - , - i32, - i64); - -define @intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vdivu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv4i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv4i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vdivu.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv4i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv8i32.i32( - , - , - i32, - i64); - -define @intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vdivu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv8i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv8i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vdivu.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv8i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv16i32.i32( - , - , - i32, - i64); - -define @intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vdivu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv16i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv16i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vdivu.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv16i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv1i64.i64( - , - , - i64, - i64); - -define @intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vdivu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv1i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv1i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv2i64.i64( - , - , - i64, - i64); - -define @intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vdivu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv2i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv2i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vdivu.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv4i64.i64( - , - , - i64, - i64); - -define @intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vdivu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv4i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv4i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vdivu.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vdivu.nxv8i64.i64( - , - , - i64, - i64); - -define @intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vdivu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.nxv8i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vdivu.mask.nxv8i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vdivu.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vdivu.mask.nxv8i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu.ll similarity index 80% rename from llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vdivu.ll index 5d8cc72..236e35b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vdivu.nxv1i8.nxv1i8( , , , - i32); + iXLen); -define @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -18,7 +20,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -28,10 +30,10 @@ declare @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -43,7 +45,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -52,9 +54,9 @@ declare @llvm.riscv.vdivu.nxv2i8.nxv2i8( , , , - i32); + iXLen); -define @intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -65,7 +67,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -75,10 +77,10 @@ declare @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -90,7 +92,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -99,9 +101,9 @@ declare @llvm.riscv.vdivu.nxv4i8.nxv4i8( , , , - i32); + iXLen); -define @intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -112,7 +114,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -122,10 +124,10 @@ declare @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -137,7 +139,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -146,9 +148,9 @@ declare @llvm.riscv.vdivu.nxv8i8.nxv8i8( , , , - i32); + iXLen); -define @intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -159,7 +161,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -169,10 +171,10 @@ declare @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -184,7 +186,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -193,9 +195,9 @@ declare @llvm.riscv.vdivu.nxv16i8.nxv16i8( , , , - i32); + iXLen); -define @intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -206,7 +208,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -216,10 +218,10 @@ declare @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -231,7 +233,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -240,9 +242,9 @@ declare @llvm.riscv.vdivu.nxv32i8.nxv32i8( , , , - i32); + iXLen); -define @intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -253,7 +255,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -263,10 +265,10 @@ declare @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -278,7 +280,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -287,9 +289,9 @@ declare @llvm.riscv.vdivu.nxv64i8.nxv64i8( , , , - i32); + iXLen); -define @intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma @@ -300,7 +302,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) @@ -326,7 +328,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -335,9 +337,9 @@ declare @llvm.riscv.vdivu.nxv1i16.nxv1i16( , , , - i32); + iXLen); -define @intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -348,7 +350,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -358,10 +360,10 @@ declare @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -373,7 +375,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -382,9 +384,9 @@ declare @llvm.riscv.vdivu.nxv2i16.nxv2i16( , , , - i32); + iXLen); -define @intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -395,7 +397,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -405,10 +407,10 @@ declare @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -420,7 +422,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -429,9 +431,9 @@ declare @llvm.riscv.vdivu.nxv4i16.nxv4i16( , , , - i32); + iXLen); -define @intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -442,7 +444,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -452,10 +454,10 @@ declare @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -467,7 +469,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -476,9 +478,9 @@ declare @llvm.riscv.vdivu.nxv8i16.nxv8i16( , , , - i32); + iXLen); -define @intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -489,7 +491,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -499,10 +501,10 @@ declare @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -514,7 +516,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -523,9 +525,9 @@ declare @llvm.riscv.vdivu.nxv16i16.nxv16i16( , , , - i32); + iXLen); -define @intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -536,7 +538,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -546,10 +548,10 @@ declare @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -561,7 +563,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -570,9 +572,9 @@ declare @llvm.riscv.vdivu.nxv32i16.nxv32i16( , , , - i32); + iXLen); -define @intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma @@ -583,7 +585,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -593,10 +595,10 @@ declare @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -609,7 +611,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -618,9 +620,9 @@ declare @llvm.riscv.vdivu.nxv1i32.nxv1i32( , , , - i32); + iXLen); -define @intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -631,7 +633,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -641,10 +643,10 @@ declare @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -656,7 +658,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -665,9 +667,9 @@ declare @llvm.riscv.vdivu.nxv2i32.nxv2i32( , , , - i32); + iXLen); -define @intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -678,7 +680,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -688,10 +690,10 @@ declare @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -703,7 +705,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -712,9 +714,9 @@ declare @llvm.riscv.vdivu.nxv4i32.nxv4i32( , , , - i32); + iXLen); -define @intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -725,7 +727,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -735,10 +737,10 @@ declare @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -750,7 +752,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -759,9 +761,9 @@ declare @llvm.riscv.vdivu.nxv8i32.nxv8i32( , , , - i32); + iXLen); -define @intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -772,7 +774,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -782,10 +784,10 @@ declare @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -797,7 +799,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -806,9 +808,9 @@ declare @llvm.riscv.vdivu.nxv16i32.nxv16i32( , , , - i32); + iXLen); -define @intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -819,7 +821,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -829,10 +831,10 @@ declare @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -845,7 +847,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -854,9 +856,9 @@ declare @llvm.riscv.vdivu.nxv1i64.nxv1i64( , , , - i32); + iXLen); -define @intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -867,7 +869,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -877,10 +879,10 @@ declare @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -892,7 +894,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -901,9 +903,9 @@ declare @llvm.riscv.vdivu.nxv2i64.nxv2i64( , , , - i32); + iXLen); -define @intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -914,7 +916,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -924,10 +926,10 @@ declare @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -939,7 +941,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vdivu.nxv4i64.nxv4i64( , , , - i32); + iXLen); -define @intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -961,7 +963,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -971,10 +973,10 @@ declare @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -986,7 +988,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -995,9 +997,9 @@ declare @llvm.riscv.vdivu.nxv8i64.nxv8i64( , , , - i32); + iXLen); -define @intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma @@ -1008,7 +1010,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -1018,10 +1020,10 @@ declare @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -1034,7 +1036,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1043,9 +1045,9 @@ declare @llvm.riscv.vdivu.nxv1i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -1056,7 +1058,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1066,10 +1068,10 @@ declare @llvm.riscv.vdivu.mask.nxv1i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu @@ -1081,7 +1083,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1090,9 +1092,9 @@ declare @llvm.riscv.vdivu.nxv2i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -1103,7 +1105,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1113,10 +1115,10 @@ declare @llvm.riscv.vdivu.mask.nxv2i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu @@ -1128,7 +1130,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1137,9 +1139,9 @@ declare @llvm.riscv.vdivu.nxv4i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -1150,7 +1152,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1160,10 +1162,10 @@ declare @llvm.riscv.vdivu.mask.nxv4i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu @@ -1175,7 +1177,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1184,9 +1186,9 @@ declare @llvm.riscv.vdivu.nxv8i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -1197,7 +1199,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1207,10 +1209,10 @@ declare @llvm.riscv.vdivu.mask.nxv8i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu @@ -1222,7 +1224,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1231,9 +1233,9 @@ declare @llvm.riscv.vdivu.nxv16i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -1244,7 +1246,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1254,10 +1256,10 @@ declare @llvm.riscv.vdivu.mask.nxv16i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu @@ -1269,7 +1271,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1278,9 +1280,9 @@ declare @llvm.riscv.vdivu.nxv32i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -1291,7 +1293,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1301,10 +1303,10 @@ declare @llvm.riscv.vdivu.mask.nxv32i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu @@ -1316,7 +1318,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1325,9 +1327,9 @@ declare @llvm.riscv.vdivu.nxv64i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma @@ -1338,7 +1340,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1348,10 +1350,10 @@ declare @llvm.riscv.vdivu.mask.nxv64i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu @@ -1363,7 +1365,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1372,9 +1374,9 @@ declare @llvm.riscv.vdivu.nxv1i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -1385,7 +1387,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1395,10 +1397,10 @@ declare @llvm.riscv.vdivu.mask.nxv1i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu @@ -1410,7 +1412,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1419,9 +1421,9 @@ declare @llvm.riscv.vdivu.nxv2i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -1432,7 +1434,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1442,10 +1444,10 @@ declare @llvm.riscv.vdivu.mask.nxv2i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu @@ -1457,7 +1459,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1466,9 +1468,9 @@ declare @llvm.riscv.vdivu.nxv4i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -1479,7 +1481,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1489,10 +1491,10 @@ declare @llvm.riscv.vdivu.mask.nxv4i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -1504,7 +1506,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1513,9 +1515,9 @@ declare @llvm.riscv.vdivu.nxv8i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -1526,7 +1528,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1536,10 +1538,10 @@ declare @llvm.riscv.vdivu.mask.nxv8i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -1551,7 +1553,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1560,9 +1562,9 @@ declare @llvm.riscv.vdivu.nxv16i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -1573,7 +1575,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1583,10 +1585,10 @@ declare @llvm.riscv.vdivu.mask.nxv16i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu @@ -1598,7 +1600,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1607,9 +1609,9 @@ declare @llvm.riscv.vdivu.nxv32i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma @@ -1620,7 +1622,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1630,10 +1632,10 @@ declare @llvm.riscv.vdivu.mask.nxv32i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu @@ -1645,7 +1647,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1654,9 +1656,9 @@ declare @llvm.riscv.vdivu.nxv1i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -1667,7 +1669,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1677,10 +1679,10 @@ declare @llvm.riscv.vdivu.mask.nxv1i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu @@ -1692,7 +1694,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1701,9 +1703,9 @@ declare @llvm.riscv.vdivu.nxv2i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -1714,7 +1716,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1724,10 +1726,10 @@ declare @llvm.riscv.vdivu.mask.nxv2i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu @@ -1739,7 +1741,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1748,9 +1750,9 @@ declare @llvm.riscv.vdivu.nxv4i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -1761,7 +1763,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1771,10 +1773,10 @@ declare @llvm.riscv.vdivu.mask.nxv4i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -1786,7 +1788,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1795,9 +1797,9 @@ declare @llvm.riscv.vdivu.nxv8i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -1808,7 +1810,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1818,10 +1820,10 @@ declare @llvm.riscv.vdivu.mask.nxv8i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -1833,7 +1835,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1842,9 +1844,9 @@ declare @llvm.riscv.vdivu.nxv16i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma @@ -1855,7 +1857,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1865,10 +1867,10 @@ declare @llvm.riscv.vdivu.mask.nxv16i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu @@ -1880,7 +1882,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1889,26 +1891,32 @@ declare @llvm.riscv.vdivu.nxv1i64.i64( , , i64, - i32); - -define @intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vdivu.vv v8, v8, v9 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vdivu.vv v8, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vdivu.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv1i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1918,28 +1926,34 @@ declare @llvm.riscv.vdivu.mask.nxv1i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vdivu.vv v8, v9, v10, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vdivu.vx v8, v9, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vdivu.mask.nxv1i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1948,26 +1962,32 @@ declare @llvm.riscv.vdivu.nxv2i64.i64( , , i64, - i32); - -define @intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vdivu.vv v8, v8, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vdivu.vv v8, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vdivu.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv2i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1977,28 +1997,34 @@ declare @llvm.riscv.vdivu.mask.nxv2i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vdivu.vv v8, v10, v12, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vdivu.vv v8, v10, v12, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vdivu.vx v8, v10, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vdivu.mask.nxv2i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -2007,26 +2033,32 @@ declare @llvm.riscv.vdivu.nxv4i64.i64( , , i64, - i32); - -define @intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vdivu.vv v8, v8, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vdivu.vv v8, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vdivu.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv4i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -2036,28 +2068,34 @@ declare @llvm.riscv.vdivu.mask.nxv4i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vdivu.vv v8, v12, v16, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vdivu.vv v8, v12, v16, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vdivu.vx v8, v12, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vdivu.mask.nxv4i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -2066,26 +2104,32 @@ declare @llvm.riscv.vdivu.nxv8i64.i64( , , i64, - i32); - -define @intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vdivu.vv v8, v8, v16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vdivu.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vdivu.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv8i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -2095,28 +2139,34 @@ declare @llvm.riscv.vdivu.mask.nxv8i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vdivu.vv v8, v16, v24, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vdivu.vx v8, v16, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vdivu.mask.nxv8i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll deleted file mode 100644 index c13eeb8..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll +++ /dev/null @@ -1,2029 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh,+experimental-zvfh,+f,+d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare { , i32 } @llvm.riscv.vleff.nxv1i64( - , - *, - i32); - -define @intrinsic_vleff_v_nxv1i64_nxv1i64(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vle64ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv1i64( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv1i64( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv1i64_nxv1i64( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vle64ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv1i64( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv2i64( - , - *, - i32); - -define @intrinsic_vleff_v_nxv2i64_nxv2i64(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vle64ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv2i64( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv2i64( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv2i64_nxv2i64( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vle64ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv2i64( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv4i64( - , - *, - i32); - -define @intrinsic_vleff_v_nxv4i64_nxv4i64(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vle64ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv4i64( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv4i64( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv4i64_nxv4i64( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vle64ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv4i64( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv8i64( - , - *, - i32); - -define @intrinsic_vleff_v_nxv8i64_nxv8i64(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vle64ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv8i64( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv8i64( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv8i64_nxv8i64( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vle64ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv8i64( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv1f64( - , - *, - i32); - -define @intrinsic_vleff_v_nxv1f64_nxv1f64(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vle64ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv1f64( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv1f64( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv1f64_nxv1f64( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vle64ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv1f64( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv2f64( - , - *, - i32); - -define @intrinsic_vleff_v_nxv2f64_nxv2f64(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vle64ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv2f64( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv2f64( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv2f64_nxv2f64( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vle64ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv2f64( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv4f64( - , - *, - i32); - -define @intrinsic_vleff_v_nxv4f64_nxv4f64(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vle64ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv4f64( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv4f64( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv4f64_nxv4f64( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vle64ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv4f64( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv8f64( - , - *, - i32); - -define @intrinsic_vleff_v_nxv8f64_nxv8f64(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vle64ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv8f64( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv8f64( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv8f64_nxv8f64( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vle64ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv8f64( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv1i32( - , - *, - i32); - -define @intrinsic_vleff_v_nxv1i32_nxv1i32(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vle32ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv1i32( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv1i32( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv1i32_nxv1i32( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vle32ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv1i32( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv2i32( - , - *, - i32); - -define @intrinsic_vleff_v_nxv2i32_nxv2i32(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vle32ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv2i32( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv2i32( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv2i32_nxv2i32( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vle32ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv2i32( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv4i32( - , - *, - i32); - -define @intrinsic_vleff_v_nxv4i32_nxv4i32(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vle32ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv4i32( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv4i32( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv4i32_nxv4i32( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vle32ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv4i32( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv8i32( - , - *, - i32); - -define @intrinsic_vleff_v_nxv8i32_nxv8i32(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vle32ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv8i32( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv8i32( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv8i32_nxv8i32( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vle32ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv8i32( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv16i32( - , - *, - i32); - -define @intrinsic_vleff_v_nxv16i32_nxv16i32(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vle32ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv16i32( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv16i32( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv16i32_nxv16i32( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vle32ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv16i32( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv1f32( - , - *, - i32); - -define @intrinsic_vleff_v_nxv1f32_nxv1f32(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vle32ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv1f32( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv1f32( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv1f32_nxv1f32( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vle32ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv1f32( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv2f32( - , - *, - i32); - -define @intrinsic_vleff_v_nxv2f32_nxv2f32(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vle32ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv2f32( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv2f32( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv2f32_nxv2f32( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vle32ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv2f32( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv4f32( - , - *, - i32); - -define @intrinsic_vleff_v_nxv4f32_nxv4f32(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vle32ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv4f32( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv4f32( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv4f32_nxv4f32( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vle32ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv4f32( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv8f32( - , - *, - i32); - -define @intrinsic_vleff_v_nxv8f32_nxv8f32(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vle32ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv8f32( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv8f32( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv8f32_nxv8f32( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vle32ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv8f32( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv16f32( - , - *, - i32); - -define @intrinsic_vleff_v_nxv16f32_nxv16f32(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vle32ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv16f32( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv16f32( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv16f32_nxv16f32( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vle32ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv16f32( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv1i16( - , - *, - i32); - -define @intrinsic_vleff_v_nxv1i16_nxv1i16(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vle16ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv1i16( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv1i16( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv1i16_nxv1i16( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vle16ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv1i16( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv2i16( - , - *, - i32); - -define @intrinsic_vleff_v_nxv2i16_nxv2i16(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vle16ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv2i16( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv2i16( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv2i16_nxv2i16( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vle16ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv2i16( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv4i16( - , - *, - i32); - -define @intrinsic_vleff_v_nxv4i16_nxv4i16(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vle16ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv4i16( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv4i16( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv4i16_nxv4i16( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vle16ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv4i16( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv8i16( - , - *, - i32); - -define @intrinsic_vleff_v_nxv8i16_nxv8i16(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vle16ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv8i16( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv8i16( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv8i16_nxv8i16( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vle16ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv8i16( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv16i16( - , - *, - i32); - -define @intrinsic_vleff_v_nxv16i16_nxv16i16(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vle16ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv16i16( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv16i16( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv16i16_nxv16i16( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vle16ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv16i16( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv32i16( - , - *, - i32); - -define @intrinsic_vleff_v_nxv32i16_nxv32i16(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vle16ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv32i16( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv32i16( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv32i16_nxv32i16( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vle16ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv32i16( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv1f16( - , - *, - i32); - -define @intrinsic_vleff_v_nxv1half_nxv1f16(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv1half_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vle16ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv1f16( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv1f16( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv1half_nxv1f16( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vle16ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv1f16( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv2f16( - , - *, - i32); - -define @intrinsic_vleff_v_nxv2half_nxv2f16(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv2half_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vle16ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv2f16( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv2f16( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv2half_nxv2f16( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vle16ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv2f16( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv4f16( - , - *, - i32); - -define @intrinsic_vleff_v_nxv4half_nxv4f16(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv4half_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vle16ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv4f16( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv4f16( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv4half_nxv4f16( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vle16ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv4f16( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv8f16( - , - *, - i32); - -define @intrinsic_vleff_v_nxv8half_nxv8f16(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv8half_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vle16ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv8f16( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv8f16( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv8half_nxv8f16( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vle16ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv8f16( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv16f16( - , - *, - i32); - -define @intrinsic_vleff_v_nxv16half_nxv16f16(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv16half_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vle16ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv16f16( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv16f16( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv16half_nxv16f16( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vle16ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv16f16( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv32f16( - , - *, - i32); - -define @intrinsic_vleff_v_nxv32half_nxv32f16(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv32half_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vle16ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv32f16( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv32f16( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv32half_nxv32f16( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vle16ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv32f16( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv1i8( - , - *, - i32); - -define @intrinsic_vleff_v_nxv1i8_nxv1i8(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vle8ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv1i8( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv1i8( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv1i8_nxv1i8( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vle8ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv1i8( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv2i8( - , - *, - i32); - -define @intrinsic_vleff_v_nxv2i8_nxv2i8(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vle8ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv2i8( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv2i8( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv2i8_nxv2i8( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vle8ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv2i8( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv4i8( - , - *, - i32); - -define @intrinsic_vleff_v_nxv4i8_nxv4i8(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vle8ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv4i8( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv4i8( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv4i8_nxv4i8( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vle8ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv4i8( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv8i8( - , - *, - i32); - -define @intrinsic_vleff_v_nxv8i8_nxv8i8(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vle8ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv8i8( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv8i8( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv8i8_nxv8i8( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vle8ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv8i8( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv16i8( - , - *, - i32); - -define @intrinsic_vleff_v_nxv16i8_nxv16i8(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vle8ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv16i8( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv16i8( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv16i8_nxv16i8( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vle8ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv16i8( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv32i8( - , - *, - i32); - -define @intrinsic_vleff_v_nxv32i8_nxv32i8(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vle8ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv32i8( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv32i8( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv32i8_nxv32i8( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vle8ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv32i8( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.nxv64i8( - , - *, - i32); - -define @intrinsic_vleff_v_nxv64i8_nxv64i8(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vle8ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv64i8( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %2 - ret %b -} - -declare { , i32 } @llvm.riscv.vleff.mask.nxv64i8( - , - *, - , - i32, - i32); - -define @intrinsic_vleff_mask_v_nxv64i8_nxv64i8( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vle8ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv64i8( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - %c = extractvalue { , i32 } %a, 1 - store i32 %c, i32* %4 - - ret %b -} - -; Test with the VL output unused -define @intrinsic_vleff_dead_vl(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_dead_vl: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vle64ff.v v8, (a0) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv1f64( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 0 - ret %b -} - -define @intrinsic_vleff_mask_dead_vl( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_dead_vl: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vle64ff.v v8, (a0), v0.t -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv1f64( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 0 - - ret %b -} - -; Test with the loaded value unused -define void @intrinsic_vleff_dead_value(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_dead_value: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vle64ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv1f64( - undef, - * %0, - i32 %1) - %b = extractvalue { , i32 } %a, 1 - store i32 %b, i32* %2 - ret void -} - -define void @intrinsic_vleff_mask_dead_value( %0, * %1, %2, i32 %3, i32* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_dead_value: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vle64ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sw a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv1f64( - %0, - * %1, - %2, - i32 %3, i32 1) - %b = extractvalue { , i32 } %a, 1 - store i32 %b, i32* %4 - - ret void -} - -; Test with both outputs dead. Make sure the vleff isn't deleted. -define void @intrinsic_vleff_dead_all(* %0, i32 %1, i32* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_dead_all: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vle64ff.v v8, (a0) -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.nxv1f64( - undef, - * %0, - i32 %1) - ret void -} - -define void @intrinsic_vleff_mask_dead_all( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_dead_all: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vle64ff.v v8, (a0), v0.t -; CHECK-NEXT: ret -entry: - %a = call { , i32 } @llvm.riscv.vleff.mask.nxv1f64( - %0, - * %1, - %2, - i32 %3, i32 1) - - ret void -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll deleted file mode 100644 index 3eefd9a..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll +++ /dev/null @@ -1,2029 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+experimental-zvfh,+f,+d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare { , i64 } @llvm.riscv.vleff.nxv1i64( - , - *, - i64); - -define @intrinsic_vleff_v_nxv1i64_nxv1i64(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vle64ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv1i64( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv1i64( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv1i64_nxv1i64( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vle64ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv1i64( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv2i64( - , - *, - i64); - -define @intrinsic_vleff_v_nxv2i64_nxv2i64(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vle64ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv2i64( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv2i64( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv2i64_nxv2i64( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vle64ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv2i64( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv4i64( - , - *, - i64); - -define @intrinsic_vleff_v_nxv4i64_nxv4i64(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vle64ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv4i64( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv4i64( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv4i64_nxv4i64( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vle64ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv4i64( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv8i64( - , - *, - i64); - -define @intrinsic_vleff_v_nxv8i64_nxv8i64(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vle64ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv8i64( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv8i64( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv8i64_nxv8i64( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vle64ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv8i64( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv1f64( - , - *, - i64); - -define @intrinsic_vleff_v_nxv1f64_nxv1f64(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vle64ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv1f64( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv1f64( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv1f64_nxv1f64( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vle64ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv1f64( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv2f64( - , - *, - i64); - -define @intrinsic_vleff_v_nxv2f64_nxv2f64(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vle64ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv2f64( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv2f64( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv2f64_nxv2f64( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vle64ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv2f64( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv4f64( - , - *, - i64); - -define @intrinsic_vleff_v_nxv4f64_nxv4f64(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vle64ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv4f64( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv4f64( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv4f64_nxv4f64( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vle64ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv4f64( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv8f64( - , - *, - i64); - -define @intrinsic_vleff_v_nxv8f64_nxv8f64(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vle64ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv8f64( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv8f64( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv8f64_nxv8f64( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vle64ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv8f64( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv1i32( - , - *, - i64); - -define @intrinsic_vleff_v_nxv1i32_nxv1i32(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vle32ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv1i32( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv1i32( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv1i32_nxv1i32( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vle32ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv1i32( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv2i32( - , - *, - i64); - -define @intrinsic_vleff_v_nxv2i32_nxv2i32(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vle32ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv2i32( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv2i32( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv2i32_nxv2i32( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vle32ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv2i32( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv4i32( - , - *, - i64); - -define @intrinsic_vleff_v_nxv4i32_nxv4i32(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vle32ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv4i32( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv4i32( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv4i32_nxv4i32( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vle32ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv4i32( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv8i32( - , - *, - i64); - -define @intrinsic_vleff_v_nxv8i32_nxv8i32(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vle32ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv8i32( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv8i32( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv8i32_nxv8i32( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vle32ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv8i32( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv16i32( - , - *, - i64); - -define @intrinsic_vleff_v_nxv16i32_nxv16i32(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vle32ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv16i32( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv16i32( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv16i32_nxv16i32( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vle32ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv16i32( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv1f32( - , - *, - i64); - -define @intrinsic_vleff_v_nxv1f32_nxv1f32(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vle32ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv1f32( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv1f32( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv1f32_nxv1f32( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vle32ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv1f32( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv2f32( - , - *, - i64); - -define @intrinsic_vleff_v_nxv2f32_nxv2f32(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vle32ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv2f32( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv2f32( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv2f32_nxv2f32( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vle32ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv2f32( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv4f32( - , - *, - i64); - -define @intrinsic_vleff_v_nxv4f32_nxv4f32(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vle32ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv4f32( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv4f32( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv4f32_nxv4f32( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vle32ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv4f32( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv8f32( - , - *, - i64); - -define @intrinsic_vleff_v_nxv8f32_nxv8f32(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vle32ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv8f32( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv8f32( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv8f32_nxv8f32( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vle32ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv8f32( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv16f32( - , - *, - i64); - -define @intrinsic_vleff_v_nxv16f32_nxv16f32(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vle32ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv16f32( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv16f32( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv16f32_nxv16f32( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vle32ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv16f32( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv1i16( - , - *, - i64); - -define @intrinsic_vleff_v_nxv1i16_nxv1i16(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vle16ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv1i16( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv1i16( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv1i16_nxv1i16( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vle16ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv1i16( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv2i16( - , - *, - i64); - -define @intrinsic_vleff_v_nxv2i16_nxv2i16(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vle16ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv2i16( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv2i16( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv2i16_nxv2i16( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vle16ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv2i16( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv4i16( - , - *, - i64); - -define @intrinsic_vleff_v_nxv4i16_nxv4i16(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vle16ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv4i16( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv4i16( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv4i16_nxv4i16( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vle16ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv4i16( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv8i16( - , - *, - i64); - -define @intrinsic_vleff_v_nxv8i16_nxv8i16(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vle16ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv8i16( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv8i16( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv8i16_nxv8i16( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vle16ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv8i16( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv16i16( - , - *, - i64); - -define @intrinsic_vleff_v_nxv16i16_nxv16i16(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vle16ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv16i16( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv16i16( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv16i16_nxv16i16( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vle16ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv16i16( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv32i16( - , - *, - i64); - -define @intrinsic_vleff_v_nxv32i16_nxv32i16(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vle16ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv32i16( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv32i16( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv32i16_nxv32i16( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vle16ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv32i16( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv1f16( - , - *, - i64); - -define @intrinsic_vleff_v_nxv1half_nxv1f16(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv1half_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vle16ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv1f16( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv1f16( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv1half_nxv1f16( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vle16ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv1f16( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv2f16( - , - *, - i64); - -define @intrinsic_vleff_v_nxv2half_nxv2f16(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv2half_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vle16ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv2f16( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv2f16( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv2half_nxv2f16( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vle16ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv2f16( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv4f16( - , - *, - i64); - -define @intrinsic_vleff_v_nxv4half_nxv4f16(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv4half_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vle16ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv4f16( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv4f16( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv4half_nxv4f16( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vle16ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv4f16( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv8f16( - , - *, - i64); - -define @intrinsic_vleff_v_nxv8half_nxv8f16(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv8half_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vle16ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv8f16( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv8f16( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv8half_nxv8f16( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vle16ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv8f16( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv16f16( - , - *, - i64); - -define @intrinsic_vleff_v_nxv16half_nxv16f16(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv16half_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vle16ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv16f16( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv16f16( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv16half_nxv16f16( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vle16ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv16f16( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv32f16( - , - *, - i64); - -define @intrinsic_vleff_v_nxv32half_nxv32f16(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv32half_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vle16ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv32f16( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv32f16( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv32half_nxv32f16( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32f16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vle16ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv32f16( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv1i8( - , - *, - i64); - -define @intrinsic_vleff_v_nxv1i8_nxv1i8(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vle8ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv1i8( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv1i8( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv1i8_nxv1i8( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vle8ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv2i8( - , - *, - i64); - -define @intrinsic_vleff_v_nxv2i8_nxv2i8(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vle8ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv2i8( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv2i8( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv2i8_nxv2i8( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vle8ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv2i8( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv4i8( - , - *, - i64); - -define @intrinsic_vleff_v_nxv4i8_nxv4i8(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vle8ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv4i8( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv4i8( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv4i8_nxv4i8( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vle8ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv4i8( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv8i8( - , - *, - i64); - -define @intrinsic_vleff_v_nxv8i8_nxv8i8(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vle8ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv8i8( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv8i8( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv8i8_nxv8i8( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vle8ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv8i8( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv16i8( - , - *, - i64); - -define @intrinsic_vleff_v_nxv16i8_nxv16i8(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vle8ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv16i8( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv16i8( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv16i8_nxv16i8( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vle8ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv16i8( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv32i8( - , - *, - i64); - -define @intrinsic_vleff_v_nxv32i8_nxv32i8(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vle8ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv32i8( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv32i8( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv32i8_nxv32i8( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vle8ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv32i8( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.nxv64i8( - , - *, - i64); - -define @intrinsic_vleff_v_nxv64i8_nxv64i8(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vle8ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv64i8( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %2 - ret %b -} - -declare { , i64 } @llvm.riscv.vleff.mask.nxv64i8( - , - *, - , - i64, - i64); - -define @intrinsic_vleff_mask_v_nxv64i8_nxv64i8( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vle8ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv64i8( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - %c = extractvalue { , i64 } %a, 1 - store i64 %c, i64* %4 - - ret %b -} - -; Test with the VL output unused -define @intrinsic_vleff_dead_vl(* %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vleff_dead_vl: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vle64ff.v v8, (a0) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv1f64( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 0 - ret %b -} - -define @intrinsic_vleff_mask_dead_vl( %0, * %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_dead_vl: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vle64ff.v v8, (a0), v0.t -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv1f64( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 0 - - ret %b -} - -; Test with the loaded value unused -define void @intrinsic_vleff_dead_value(* %0, i64 %1, i64* %2) nounwind { -; CHECK-LABEL: intrinsic_vleff_dead_value: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vle64ff.v v8, (a0) -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv1f64( - undef, - * %0, - i64 %1) - %b = extractvalue { , i64 } %a, 1 - store i64 %b, i64* %2 - ret void -} - -define void @intrinsic_vleff_mask_dead_value( %0, * %1, %2, i64 %3, i64* %4) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_dead_value: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vle64ff.v v8, (a0), v0.t -; CHECK-NEXT: csrr a0, vl -; CHECK-NEXT: sd a0, 0(a2) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv1f64( - %0, - * %1, - %2, - i64 %3, i64 1) - %b = extractvalue { , i64 } %a, 1 - store i64 %b, i64* %4 - - ret void -} - -; Test with both outputs dead. Make sure the vleff isn't deleted. -define void @intrinsic_vleff_dead_all(* %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vleff_dead_all: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vle64ff.v v8, (a0) -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.nxv1f64( - undef, - * %0, - i64 %1) - ret void -} - -define void @intrinsic_vleff_mask_dead_all( %0, * %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vleff_mask_dead_all: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vle64ff.v v8, (a0), v0.t -; CHECK-NEXT: ret -entry: - %a = call { , i64 } @llvm.riscv.vleff.mask.nxv1f64( - %0, - * %1, - %2, - i64 %3, i64 1) - - ret void -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff.ll b/llvm/test/CodeGen/RISCV/rvv/vleff.ll new file mode 100644 index 0000000..5db8417 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vleff.ll @@ -0,0 +1,2639 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64 +declare { , iXLen } @llvm.riscv.vleff.nxv1i64( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv1i64_nxv1i64(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV32-NEXT: vle64ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vle64ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv1i64( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv1i64( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv1i64_nxv1i64( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV32-NEXT: vle64ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vle64ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv1i64( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv2i64( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv2i64_nxv2i64(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV32-NEXT: vle64ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vle64ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv2i64( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv2i64( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv2i64_nxv2i64( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV32-NEXT: vle64ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vle64ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv2i64( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv4i64( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv4i64_nxv4i64(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV32-NEXT: vle64ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vle64ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv4i64( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv4i64( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv4i64_nxv4i64( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vle64ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vle64ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv4i64( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv8i64( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv8i64_nxv8i64(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vle64ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vle64ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv8i64( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv8i64( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv8i64_nxv8i64( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vle64ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vle64ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv8i64( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv1f64( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv1f64_nxv1f64(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV32-NEXT: vle64ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vle64ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv1f64( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv1f64( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv1f64_nxv1f64( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV32-NEXT: vle64ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vle64ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv1f64( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv2f64( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv2f64_nxv2f64(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV32-NEXT: vle64ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vle64ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv2f64( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv2f64( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv2f64_nxv2f64( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV32-NEXT: vle64ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vle64ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv2f64( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv4f64( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv4f64_nxv4f64(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV32-NEXT: vle64ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vle64ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv4f64( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv4f64( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv4f64_nxv4f64( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vle64ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vle64ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv4f64( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv8f64( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv8f64_nxv8f64(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vle64ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vle64ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv8f64( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv8f64( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv8f64_nxv8f64( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vle64ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vle64ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv8f64( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv1i32( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv1i32_nxv1i32(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; RV32-NEXT: vle32ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; RV64-NEXT: vle32ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv1i32( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv1i32( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv1i32_nxv1i32( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; RV32-NEXT: vle32ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; RV64-NEXT: vle32ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv1i32( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv2i32( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv2i32_nxv2i32(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; RV32-NEXT: vle32ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; RV64-NEXT: vle32ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv2i32( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv2i32( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv2i32_nxv2i32( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; RV32-NEXT: vle32ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; RV64-NEXT: vle32ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv2i32( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv4i32( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv4i32_nxv4i32(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; RV32-NEXT: vle32ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; RV64-NEXT: vle32ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv4i32( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv4i32( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv4i32_nxv4i32( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vle32ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vle32ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv4i32( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv8i32( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv8i32_nxv8i32(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; RV32-NEXT: vle32ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; RV64-NEXT: vle32ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv8i32( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv8i32( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv8i32_nxv8i32( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vle32ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vle32ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv8i32( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv16i32( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv16i32_nxv16i32(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vle32ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV64-NEXT: vle32ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv16i32( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv16i32( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv16i32_nxv16i32( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV32-NEXT: vle32ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV64-NEXT: vle32ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv16i32( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv1f32( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv1f32_nxv1f32(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; RV32-NEXT: vle32ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; RV64-NEXT: vle32ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv1f32( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv1f32( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv1f32_nxv1f32( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; RV32-NEXT: vle32ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; RV64-NEXT: vle32ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv1f32( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv2f32( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv2f32_nxv2f32(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; RV32-NEXT: vle32ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; RV64-NEXT: vle32ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv2f32( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv2f32( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv2f32_nxv2f32( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; RV32-NEXT: vle32ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; RV64-NEXT: vle32ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv2f32( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv4f32( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv4f32_nxv4f32(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; RV32-NEXT: vle32ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; RV64-NEXT: vle32ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv4f32( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv4f32( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv4f32_nxv4f32( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vle32ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vle32ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv4f32( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv8f32( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv8f32_nxv8f32(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; RV32-NEXT: vle32ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; RV64-NEXT: vle32ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv8f32( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv8f32( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv8f32_nxv8f32( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vle32ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vle32ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv8f32( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv16f32( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv16f32_nxv16f32(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vle32ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV64-NEXT: vle32ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv16f32( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv16f32( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv16f32_nxv16f32( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV32-NEXT: vle32ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV64-NEXT: vle32ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv16f32( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv1i16( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv1i16_nxv1i16(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; RV32-NEXT: vle16ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; RV64-NEXT: vle16ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv1i16( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv1i16( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv1i16_nxv1i16( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; RV32-NEXT: vle16ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; RV64-NEXT: vle16ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv1i16( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv2i16( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv2i16_nxv2i16(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; RV32-NEXT: vle16ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; RV64-NEXT: vle16ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv2i16( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv2i16( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv2i16_nxv2i16( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; RV32-NEXT: vle16ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; RV64-NEXT: vle16ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv2i16( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv4i16( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv4i16_nxv4i16(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; RV32-NEXT: vle16ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; RV64-NEXT: vle16ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv4i16( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv4i16( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv4i16_nxv4i16( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vle16ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vle16ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv4i16( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv8i16( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv8i16_nxv8i16(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; RV32-NEXT: vle16ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; RV64-NEXT: vle16ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv8i16( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv8i16( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv8i16_nxv8i16( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vle16ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vle16ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv8i16( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv16i16( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv16i16_nxv16i16(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; RV32-NEXT: vle16ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; RV64-NEXT: vle16ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv16i16( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv16i16( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv16i16_nxv16i16( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; RV32-NEXT: vle16ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; RV64-NEXT: vle16ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv16i16( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv32i16( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv32i16_nxv32i16(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; RV32-NEXT: vle16ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; RV64-NEXT: vle16ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv32i16( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv32i16( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv32i16_nxv32i16( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; RV32-NEXT: vle16ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; RV64-NEXT: vle16ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv32i16( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv1f16( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv1half_nxv1f16(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv1half_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; RV32-NEXT: vle16ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv1half_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; RV64-NEXT: vle16ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv1f16( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv1f16( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv1half_nxv1f16( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; RV32-NEXT: vle16ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; RV64-NEXT: vle16ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv1f16( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv2f16( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv2half_nxv2f16(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv2half_nxv2f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; RV32-NEXT: vle16ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv2half_nxv2f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; RV64-NEXT: vle16ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv2f16( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv2f16( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv2half_nxv2f16( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; RV32-NEXT: vle16ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; RV64-NEXT: vle16ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv2f16( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv4f16( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv4half_nxv4f16(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv4half_nxv4f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; RV32-NEXT: vle16ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv4half_nxv4f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; RV64-NEXT: vle16ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv4f16( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv4f16( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv4half_nxv4f16( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vle16ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vle16ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv4f16( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv8f16( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv8half_nxv8f16(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv8half_nxv8f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; RV32-NEXT: vle16ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv8half_nxv8f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; RV64-NEXT: vle16ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv8f16( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv8f16( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv8half_nxv8f16( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vle16ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vle16ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv8f16( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv16f16( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv16half_nxv16f16(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv16half_nxv16f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; RV32-NEXT: vle16ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv16half_nxv16f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; RV64-NEXT: vle16ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv16f16( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv16f16( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv16half_nxv16f16( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; RV32-NEXT: vle16ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; RV64-NEXT: vle16ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv16f16( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv32f16( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv32half_nxv32f16(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv32half_nxv32f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; RV32-NEXT: vle16ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv32half_nxv32f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; RV64-NEXT: vle16ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv32f16( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv32f16( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv32half_nxv32f16( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; RV32-NEXT: vle16ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; RV64-NEXT: vle16ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv32f16( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv1i8( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv1i8_nxv1i8(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; RV32-NEXT: vle8ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; RV64-NEXT: vle8ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv1i8( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv1i8( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv1i8_nxv1i8( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; RV32-NEXT: vle8ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; RV64-NEXT: vle8ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv1i8( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv2i8( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv2i8_nxv2i8(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; RV32-NEXT: vle8ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; RV64-NEXT: vle8ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv2i8( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv2i8( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv2i8_nxv2i8( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; RV32-NEXT: vle8ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; RV64-NEXT: vle8ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv2i8( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv4i8( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv4i8_nxv4i8(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; RV32-NEXT: vle8ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; RV64-NEXT: vle8ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv4i8( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv4i8( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv4i8_nxv4i8( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; RV32-NEXT: vle8ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; RV64-NEXT: vle8ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv4i8( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv8i8( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv8i8_nxv8i8(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; RV32-NEXT: vle8ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; RV64-NEXT: vle8ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv8i8( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv8i8( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv8i8_nxv8i8( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; RV32-NEXT: vle8ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; RV64-NEXT: vle8ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv8i8( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv16i8( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv16i8_nxv16i8(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; RV32-NEXT: vle8ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; RV64-NEXT: vle8ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv16i8( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv16i8( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv16i8_nxv16i8( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; RV32-NEXT: vle8ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; RV64-NEXT: vle8ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv16i8( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv32i8( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv32i8_nxv32i8(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; RV32-NEXT: vle8ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; RV64-NEXT: vle8ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv32i8( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv32i8( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv32i8_nxv32i8( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; RV32-NEXT: vle8ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; RV64-NEXT: vle8ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv32i8( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.nxv64i8( + , + *, + iXLen); + +define @intrinsic_vleff_v_nxv64i8_nxv64i8(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; RV32-NEXT: vle8ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; RV64-NEXT: vle8ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv64i8( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %2 + ret %b +} + +declare { , iXLen } @llvm.riscv.vleff.mask.nxv64i8( + , + *, + , + iXLen, + iXLen); + +define @intrinsic_vleff_mask_v_nxv64i8_nxv64i8( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; RV32-NEXT: vle8ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; RV64-NEXT: vle8ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv64i8( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %4 + + ret %b +} + +; Test with the VL output unused +define @intrinsic_vleff_dead_vl(* %0, iXLen %1, iXLen* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_dead_vl: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv1f64( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 0 + ret %b +} + +define @intrinsic_vleff_mask_dead_vl( %0, * %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_dead_vl: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv1f64( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 0 + + ret %b +} + +; Test with the loaded value unused +define void @intrinsic_vleff_dead_value(* %0, iXLen %1, iXLen* %2) nounwind { +; RV32-LABEL: intrinsic_vleff_dead_value: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV32-NEXT: vle64ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_dead_value: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vle64ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv1f64( + undef, + * %0, + iXLen %1) + %b = extractvalue { , iXLen } %a, 1 + store iXLen %b, iXLen* %2 + ret void +} + +define void @intrinsic_vleff_mask_dead_value( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { +; RV32-LABEL: intrinsic_vleff_mask_dead_value: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV32-NEXT: vle64ff.v v8, (a0), v0.t +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_mask_dead_value: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vle64ff.v v8, (a0), v0.t +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv1f64( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + %b = extractvalue { , iXLen } %a, 1 + store iXLen %b, iXLen* %4 + + ret void +} + +; Test with both outputs dead. Make sure the vleff isn't deleted. +define void @intrinsic_vleff_dead_all(* %0, iXLen %1, iXLen* %2) nounwind { +; CHECK-LABEL: intrinsic_vleff_dead_all: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.nxv1f64( + undef, + * %0, + iXLen %1) + ret void +} + +define void @intrinsic_vleff_mask_dead_all( %0, * %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vleff_mask_dead_all: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv1f64( + %0, + * %1, + %2, + iXLen %3, iXLen 1) + + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll deleted file mode 100644 index 0b7dc24..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll +++ /dev/null @@ -1,1772 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmacc.nxv1i8.nxv1i8( - , - , - , - i32, - i32 -); - -define @intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma -; CHECK-NEXT: vmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv1i8.nxv1i8( - %0, - %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( - , - , - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu -; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( - %0, - %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv2i8.nxv2i8( - , - , - , - i32, - i32 -); - -define @intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma -; CHECK-NEXT: vmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv2i8.nxv2i8( - %0, - %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8( - , - , - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu -; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8( - %0, - %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv4i8.nxv4i8( - , - , - , - i32, - i32 -); - -define @intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma -; CHECK-NEXT: vmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv4i8.nxv4i8( - %0, - %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8( - , - , - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu -; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8( - %0, - %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv8i8.nxv8i8( - , - , - , - i32, - i32 -); - -define @intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma -; CHECK-NEXT: vmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv8i8.nxv8i8( - %0, - %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8( - , - , - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu -; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8( - %0, - %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv16i8.nxv16i8( - , - , - , - i32, - i32 -); - -define @intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma -; CHECK-NEXT: vmacc.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv16i8.nxv16i8( - %0, - %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8( - , - , - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu -; CHECK-NEXT: vmacc.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8( - %0, - %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv32i8.nxv32i8( - , - , - , - i32, - i32 -); - -define @intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma -; CHECK-NEXT: vmacc.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv32i8.nxv32i8( - %0, - %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8( - , - , - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu -; CHECK-NEXT: vmacc.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8( - %0, - %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv1i16.nxv1i16( - , - , - , - i32, - i32 -); - -define @intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma -; CHECK-NEXT: vmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv1i16.nxv1i16( - %0, - %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16( - , - , - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16( - %0, - %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv2i16.nxv2i16( - , - , - , - i32, - i32 -); - -define @intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma -; CHECK-NEXT: vmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv2i16.nxv2i16( - %0, - %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16( - , - , - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16( - %0, - %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv4i16.nxv4i16( - , - , - , - i32, - i32 -); - -define @intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma -; CHECK-NEXT: vmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv4i16.nxv4i16( - %0, - %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16( - , - , - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16( - %0, - %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv8i16.nxv8i16( - , - , - , - i32, - i32 -); - -define @intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma -; CHECK-NEXT: vmacc.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv8i16.nxv8i16( - %0, - %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16( - , - , - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vmacc.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16( - %0, - %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv16i16.nxv16i16( - , - , - , - i32, - i32 -); - -define @intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma -; CHECK-NEXT: vmacc.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv16i16.nxv16i16( - %0, - %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16( - , - , - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vmacc.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16( - %0, - %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv1i32.nxv1i32( - , - , - , - i32, - i32 -); - -define @intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma -; CHECK-NEXT: vmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv1i32.nxv1i32( - %0, - %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32( - , - , - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32( - %0, - %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv2i32.nxv2i32( - , - , - , - i32, - i32 -); - -define @intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma -; CHECK-NEXT: vmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv2i32.nxv2i32( - %0, - %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32( - , - , - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32( - %0, - %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv4i32.nxv4i32( - , - , - , - i32, - i32 -); - -define @intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma -; CHECK-NEXT: vmacc.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv4i32.nxv4i32( - %0, - %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32( - , - , - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vmacc.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32( - %0, - %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv8i32.nxv8i32( - , - , - , - i32, - i32 -); - -define @intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma -; CHECK-NEXT: vmacc.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv8i32.nxv8i32( - %0, - %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32( - , - , - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vmacc.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32( - %0, - %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv1i64.nxv1i64( - , - , - , - i32, - i32 -); - -define @intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma -; CHECK-NEXT: vmacc.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv1i64.nxv1i64( - %0, - %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64( - , - , - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64( - %0, - %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv2i64.nxv2i64( - , - , - , - i32, - i32 -); - -define @intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma -; CHECK-NEXT: vmacc.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv2i64.nxv2i64( - %0, - %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64( - , - , - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vmacc.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64( - %0, - %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv4i64.nxv4i64( - , - , - , - i32, - i32 -); - -define @intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma -; CHECK-NEXT: vmacc.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv4i64.nxv4i64( - %0, - %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64( - , - , - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vmacc.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64( - %0, - %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv1i8.i8( - , - i8, - , - i32, - i32 -); - -define @intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma -; CHECK-NEXT: vmacc.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv1i8.i8( - %0, - i8 %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv1i8.i8( - , - i8, - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu -; CHECK-NEXT: vmacc.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv1i8.i8( - %0, - i8 %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv2i8.i8( - , - i8, - , - i32, - i32 -); - -define @intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma -; CHECK-NEXT: vmacc.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv2i8.i8( - %0, - i8 %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv2i8.i8( - , - i8, - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu -; CHECK-NEXT: vmacc.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv2i8.i8( - %0, - i8 %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv4i8.i8( - , - i8, - , - i32, - i32 -); - -define @intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma -; CHECK-NEXT: vmacc.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv4i8.i8( - %0, - i8 %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv4i8.i8( - , - i8, - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu -; CHECK-NEXT: vmacc.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv4i8.i8( - %0, - i8 %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv8i8.i8( - , - i8, - , - i32, - i32 -); - -define @intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma -; CHECK-NEXT: vmacc.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv8i8.i8( - %0, - i8 %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv8i8.i8( - , - i8, - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu -; CHECK-NEXT: vmacc.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv8i8.i8( - %0, - i8 %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv16i8.i8( - , - i8, - , - i32, - i32 -); - -define @intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma -; CHECK-NEXT: vmacc.vx v8, a0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv16i8.i8( - %0, - i8 %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv16i8.i8( - , - i8, - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu -; CHECK-NEXT: vmacc.vx v8, a0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv16i8.i8( - %0, - i8 %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv32i8.i8( - , - i8, - , - i32, - i32 -); - -define @intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma -; CHECK-NEXT: vmacc.vx v8, a0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv32i8.i8( - %0, - i8 %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv32i8.i8( - , - i8, - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu -; CHECK-NEXT: vmacc.vx v8, a0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv32i8.i8( - %0, - i8 %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv1i16.i16( - , - i16, - , - i32, - i32 -); - -define @intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma -; CHECK-NEXT: vmacc.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv1i16.i16( - %0, - i16 %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv1i16.i16( - , - i16, - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vmacc.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv1i16.i16( - %0, - i16 %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv2i16.i16( - , - i16, - , - i32, - i32 -); - -define @intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma -; CHECK-NEXT: vmacc.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv2i16.i16( - %0, - i16 %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv2i16.i16( - , - i16, - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vmacc.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv2i16.i16( - %0, - i16 %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv4i16.i16( - , - i16, - , - i32, - i32 -); - -define @intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma -; CHECK-NEXT: vmacc.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv4i16.i16( - %0, - i16 %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv4i16.i16( - , - i16, - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vmacc.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv4i16.i16( - %0, - i16 %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv8i16.i16( - , - i16, - , - i32, - i32 -); - -define @intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma -; CHECK-NEXT: vmacc.vx v8, a0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv8i16.i16( - %0, - i16 %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv8i16.i16( - , - i16, - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vmacc.vx v8, a0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv8i16.i16( - %0, - i16 %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv16i16.i16( - , - i16, - , - i32, - i32 -); - -define @intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma -; CHECK-NEXT: vmacc.vx v8, a0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv16i16.i16( - %0, - i16 %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv16i16.i16( - , - i16, - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vmacc.vx v8, a0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv16i16.i16( - %0, - i16 %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv1i32.i32( - , - i32, - , - i32, - i32 -); - -define @intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma -; CHECK-NEXT: vmacc.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv1i32.i32( - %0, - i32 %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv1i32.i32( - , - i32, - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vmacc.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv1i32.i32( - %0, - i32 %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv2i32.i32( - , - i32, - , - i32, - i32 -); - -define @intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma -; CHECK-NEXT: vmacc.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv2i32.i32( - %0, - i32 %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv2i32.i32( - , - i32, - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vmacc.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv2i32.i32( - %0, - i32 %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv4i32.i32( - , - i32, - , - i32, - i32 -); - -define @intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma -; CHECK-NEXT: vmacc.vx v8, a0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv4i32.i32( - %0, - i32 %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv4i32.i32( - , - i32, - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vmacc.vx v8, a0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv4i32.i32( - %0, - i32 %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv8i32.i32( - , - i32, - , - i32, - i32 -); - -define @intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma -; CHECK-NEXT: vmacc.vx v8, a0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv8i32.i32( - %0, - i32 %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv8i32.i32( - , - i32, - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vmacc.vx v8, a0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv8i32.i32( - %0, - i32 %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv1i64.i64( - , - i64, - , - i32, - i32 -); - -define @intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, ma -; CHECK-NEXT: vmacc.vv v8, v10, v9 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv1i64.i64( - %0, - i64 %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv1i64.i64( - , - i64, - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu -; CHECK-NEXT: vmacc.vv v8, v10, v9, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv1i64.i64( - %0, - i64 %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv2i64.i64( - , - i64, - , - i32, - i32 -); - -define @intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, ma -; CHECK-NEXT: vmacc.vv v8, v12, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv2i64.i64( - %0, - i64 %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv2i64.i64( - , - i64, - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu -; CHECK-NEXT: vmacc.vv v8, v12, v10, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv2i64.i64( - %0, - i64 %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.nxv4i64.i64( - , - i64, - , - i32, - i32 -); - -define @intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, ma -; CHECK-NEXT: vmacc.vv v8, v16, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.nxv4i64.i64( - %0, - i64 %1, - %2, - i32 %3, i32 0) - - ret %a -} - -declare @llvm.riscv.vmacc.mask.nxv4i64.i64( - , - i64, - , - , - i32, i32); - -define @intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu -; CHECK-NEXT: vmacc.vv v8, v16, v12, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmacc.mask.nxv4i64.i64( - %0, - i64 %1, - %2, - %3, - i32 %4, i32 0) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc.ll similarity index 80% rename from llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vmacc.ll index a41f679..d115199 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmacc.ll @@ -1,14 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vmacc.nxv1i8.nxv1i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma @@ -19,7 +21,7 @@ entry: %0, %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -29,9 +31,9 @@ declare @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( , , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu @@ -43,7 +45,7 @@ entry: %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -52,10 +54,10 @@ declare @llvm.riscv.vmacc.nxv2i8.nxv2i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma @@ -66,7 +68,7 @@ entry: %0, %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -76,9 +78,9 @@ declare @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8( , , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu @@ -90,7 +92,7 @@ entry: %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -99,10 +101,10 @@ declare @llvm.riscv.vmacc.nxv4i8.nxv4i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma @@ -113,7 +115,7 @@ entry: %0, %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -123,9 +125,9 @@ declare @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8( , , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu @@ -137,7 +139,7 @@ entry: %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -146,10 +148,10 @@ declare @llvm.riscv.vmacc.nxv8i8.nxv8i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma @@ -160,7 +162,7 @@ entry: %0, %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -170,9 +172,9 @@ declare @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8( , , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu @@ -184,7 +186,7 @@ entry: %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -193,10 +195,10 @@ declare @llvm.riscv.vmacc.nxv16i8.nxv16i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma @@ -207,7 +209,7 @@ entry: %0, %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -217,9 +219,9 @@ declare @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8( , , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu @@ -231,7 +233,7 @@ entry: %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -240,10 +242,10 @@ declare @llvm.riscv.vmacc.nxv32i8.nxv32i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma @@ -254,7 +256,7 @@ entry: %0, %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -264,9 +266,9 @@ declare @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8( , , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu @@ -278,7 +280,7 @@ entry: %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -287,10 +289,10 @@ declare @llvm.riscv.vmacc.nxv1i16.nxv1i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma @@ -301,7 +303,7 @@ entry: %0, %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -311,9 +313,9 @@ declare @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16( , , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -325,7 +327,7 @@ entry: %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -334,10 +336,10 @@ declare @llvm.riscv.vmacc.nxv2i16.nxv2i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma @@ -348,7 +350,7 @@ entry: %0, %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -358,9 +360,9 @@ declare @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16( , , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -372,7 +374,7 @@ entry: %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -381,10 +383,10 @@ declare @llvm.riscv.vmacc.nxv4i16.nxv4i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma @@ -395,7 +397,7 @@ entry: %0, %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -405,9 +407,9 @@ declare @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16( , , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -419,7 +421,7 @@ entry: %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -428,10 +430,10 @@ declare @llvm.riscv.vmacc.nxv8i16.nxv8i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma @@ -442,7 +444,7 @@ entry: %0, %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -452,9 +454,9 @@ declare @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16( , , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -466,7 +468,7 @@ entry: %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -475,10 +477,10 @@ declare @llvm.riscv.vmacc.nxv16i16.nxv16i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma @@ -489,7 +491,7 @@ entry: %0, %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -499,9 +501,9 @@ declare @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16( , , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -513,7 +515,7 @@ entry: %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -522,10 +524,10 @@ declare @llvm.riscv.vmacc.nxv1i32.nxv1i32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma @@ -536,7 +538,7 @@ entry: %0, %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -546,9 +548,9 @@ declare @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32( , , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -560,7 +562,7 @@ entry: %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -569,10 +571,10 @@ declare @llvm.riscv.vmacc.nxv2i32.nxv2i32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma @@ -583,7 +585,7 @@ entry: %0, %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -593,9 +595,9 @@ declare @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32( , , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -607,7 +609,7 @@ entry: %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -616,10 +618,10 @@ declare @llvm.riscv.vmacc.nxv4i32.nxv4i32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma @@ -630,7 +632,7 @@ entry: %0, %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -640,9 +642,9 @@ declare @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32( , , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -654,7 +656,7 @@ entry: %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -663,10 +665,10 @@ declare @llvm.riscv.vmacc.nxv8i32.nxv8i32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma @@ -677,7 +679,7 @@ entry: %0, %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -687,9 +689,9 @@ declare @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32( , , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -701,7 +703,7 @@ entry: %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -710,10 +712,10 @@ declare @llvm.riscv.vmacc.nxv1i64.nxv1i64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma @@ -724,7 +726,7 @@ entry: %0, %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -734,9 +736,9 @@ declare @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64( , , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -748,7 +750,7 @@ entry: %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -757,10 +759,10 @@ declare @llvm.riscv.vmacc.nxv2i64.nxv2i64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma @@ -771,7 +773,7 @@ entry: %0, %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -781,9 +783,9 @@ declare @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64( , , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -795,7 +797,7 @@ entry: %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -804,10 +806,10 @@ declare @llvm.riscv.vmacc.nxv4i64.nxv4i64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma @@ -818,7 +820,7 @@ entry: %0, %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -828,9 +830,9 @@ declare @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64( , , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -842,7 +844,7 @@ entry: %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -851,10 +853,10 @@ declare @llvm.riscv.vmacc.nxv1i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma @@ -865,7 +867,7 @@ entry: %0, i8 %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -875,9 +877,9 @@ declare @llvm.riscv.vmacc.mask.nxv1i8.i8( i8, , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu @@ -889,7 +891,7 @@ entry: i8 %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -898,10 +900,10 @@ declare @llvm.riscv.vmacc.nxv2i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma @@ -912,7 +914,7 @@ entry: %0, i8 %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -922,9 +924,9 @@ declare @llvm.riscv.vmacc.mask.nxv2i8.i8( i8, , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu @@ -936,7 +938,7 @@ entry: i8 %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -945,10 +947,10 @@ declare @llvm.riscv.vmacc.nxv4i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma @@ -959,7 +961,7 @@ entry: %0, i8 %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -969,9 +971,9 @@ declare @llvm.riscv.vmacc.mask.nxv4i8.i8( i8, , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu @@ -983,7 +985,7 @@ entry: i8 %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -992,10 +994,10 @@ declare @llvm.riscv.vmacc.nxv8i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma @@ -1006,7 +1008,7 @@ entry: %0, i8 %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -1016,9 +1018,9 @@ declare @llvm.riscv.vmacc.mask.nxv8i8.i8( i8, , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu @@ -1030,7 +1032,7 @@ entry: i8 %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -1039,10 +1041,10 @@ declare @llvm.riscv.vmacc.nxv16i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma @@ -1053,7 +1055,7 @@ entry: %0, i8 %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -1063,9 +1065,9 @@ declare @llvm.riscv.vmacc.mask.nxv16i8.i8( i8, , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu @@ -1077,7 +1079,7 @@ entry: i8 %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -1086,10 +1088,10 @@ declare @llvm.riscv.vmacc.nxv32i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma @@ -1100,7 +1102,7 @@ entry: %0, i8 %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -1110,9 +1112,9 @@ declare @llvm.riscv.vmacc.mask.nxv32i8.i8( i8, , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu @@ -1124,7 +1126,7 @@ entry: i8 %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -1133,10 +1135,10 @@ declare @llvm.riscv.vmacc.nxv1i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma @@ -1147,7 +1149,7 @@ entry: %0, i16 %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -1157,9 +1159,9 @@ declare @llvm.riscv.vmacc.mask.nxv1i16.i16( i16, , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu @@ -1171,7 +1173,7 @@ entry: i16 %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -1180,10 +1182,10 @@ declare @llvm.riscv.vmacc.nxv2i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma @@ -1194,7 +1196,7 @@ entry: %0, i16 %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -1204,9 +1206,9 @@ declare @llvm.riscv.vmacc.mask.nxv2i16.i16( i16, , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu @@ -1218,7 +1220,7 @@ entry: i16 %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -1227,10 +1229,10 @@ declare @llvm.riscv.vmacc.nxv4i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma @@ -1241,7 +1243,7 @@ entry: %0, i16 %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -1251,9 +1253,9 @@ declare @llvm.riscv.vmacc.mask.nxv4i16.i16( i16, , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu @@ -1265,7 +1267,7 @@ entry: i16 %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -1274,10 +1276,10 @@ declare @llvm.riscv.vmacc.nxv8i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma @@ -1288,7 +1290,7 @@ entry: %0, i16 %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -1298,9 +1300,9 @@ declare @llvm.riscv.vmacc.mask.nxv8i16.i16( i16, , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu @@ -1312,7 +1314,7 @@ entry: i16 %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -1321,10 +1323,10 @@ declare @llvm.riscv.vmacc.nxv16i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma @@ -1335,7 +1337,7 @@ entry: %0, i16 %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -1345,9 +1347,9 @@ declare @llvm.riscv.vmacc.mask.nxv16i16.i16( i16, , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu @@ -1359,7 +1361,7 @@ entry: i16 %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -1368,10 +1370,10 @@ declare @llvm.riscv.vmacc.nxv1i32.i32( , i32, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma @@ -1382,7 +1384,7 @@ entry: %0, i32 %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -1392,9 +1394,9 @@ declare @llvm.riscv.vmacc.mask.nxv1i32.i32( i32, , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu @@ -1406,7 +1408,7 @@ entry: i32 %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -1415,10 +1417,10 @@ declare @llvm.riscv.vmacc.nxv2i32.i32( , i32, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma @@ -1429,7 +1431,7 @@ entry: %0, i32 %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -1439,9 +1441,9 @@ declare @llvm.riscv.vmacc.mask.nxv2i32.i32( i32, , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu @@ -1453,7 +1455,7 @@ entry: i32 %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -1462,10 +1464,10 @@ declare @llvm.riscv.vmacc.nxv4i32.i32( , i32, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma @@ -1476,7 +1478,7 @@ entry: %0, i32 %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -1486,9 +1488,9 @@ declare @llvm.riscv.vmacc.mask.nxv4i32.i32( i32, , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu @@ -1500,7 +1502,7 @@ entry: i32 %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -1509,10 +1511,10 @@ declare @llvm.riscv.vmacc.nxv8i32.i32( , i32, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i64 %3) nounwind { +define @intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma @@ -1523,7 +1525,7 @@ entry: %0, i32 %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -1533,9 +1535,9 @@ declare @llvm.riscv.vmacc.mask.nxv8i32.i32( i32, , , - i64, i64); + iXLen, iXLen); -define @intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu @@ -1547,7 +1549,7 @@ entry: i32 %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -1556,21 +1558,34 @@ declare @llvm.riscv.vmacc.nxv1i64.i64( , i64, , - i64, - i64); - -define @intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, ma -; CHECK-NEXT: vmacc.vx v8, a0, v9 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma +; RV32-NEXT: vmacc.vv v8, v10, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; RV64-NEXT: vmacc.vx v8, a0, v9 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmacc.nxv1i64.i64( %0, i64 %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -1580,21 +1595,34 @@ declare @llvm.riscv.vmacc.mask.nxv1i64.i64( i64, , , - i64, i64); - -define @intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu -; CHECK-NEXT: vmacc.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret + iXLen, iXLen); + +define @intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV32-NEXT: vmacc.vv v8, v10, v9, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vmacc.vx v8, a0, v9, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmacc.mask.nxv1i64.i64( %0, i64 %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -1603,21 +1631,34 @@ declare @llvm.riscv.vmacc.nxv2i64.i64( , i64, , - i64, - i64); - -define @intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, ma -; CHECK-NEXT: vmacc.vx v8, a0, v10 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, ma +; RV32-NEXT: vmacc.vv v8, v12, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma +; RV64-NEXT: vmacc.vx v8, a0, v10 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmacc.nxv2i64.i64( %0, i64 %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -1627,21 +1668,34 @@ declare @llvm.riscv.vmacc.mask.nxv2i64.i64( i64, , , - i64, i64); - -define @intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu -; CHECK-NEXT: vmacc.vx v8, a0, v10, v0.t -; CHECK-NEXT: ret + iXLen, iXLen); + +define @intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; RV32-NEXT: vmacc.vv v8, v12, v10, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; RV64-NEXT: vmacc.vx v8, a0, v10, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmacc.mask.nxv2i64.i64( %0, i64 %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } @@ -1650,21 +1704,34 @@ declare @llvm.riscv.vmacc.nxv4i64.i64( , i64, , - i64, - i64); - -define @intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, ma -; CHECK-NEXT: vmacc.vx v8, a0, v12 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, ma +; RV32-NEXT: vmacc.vv v8, v16, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma +; RV64-NEXT: vmacc.vx v8, a0, v12 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmacc.nxv4i64.i64( %0, i64 %1, %2, - i64 %3, i64 0) + iXLen %3, iXLen 0) ret %a } @@ -1674,21 +1741,34 @@ declare @llvm.riscv.vmacc.mask.nxv4i64.i64( i64, , , - i64, i64); - -define @intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu -; CHECK-NEXT: vmacc.vx v8, a0, v12, v0.t -; CHECK-NEXT: ret + iXLen, iXLen); + +define @intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; RV32-NEXT: vmacc.vv v8, v16, v12, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; RV64-NEXT: vmacc.vx v8, a0, v12, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmacc.mask.nxv4i64.i64( %0, i64 %1, %2, %3, - i64 %4, i64 0) + iXLen %4, iXLen 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll deleted file mode 100644 index 2aa8661..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll +++ /dev/null @@ -1,1236 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmadc.nxv1i8.nxv1i8( - , - , - i32); - -define @intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmadc.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv1i8.nxv1i8( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv2i8.nxv2i8( - , - , - i32); - -define @intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmadc.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv2i8.nxv2i8( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv4i8.nxv4i8( - , - , - i32); - -define @intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmadc.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv4i8.nxv4i8( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv8i8.nxv8i8( - , - , - i32); - -define @intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmadc.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv8i8.nxv8i8( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv16i8.nxv16i8( - , - , - i32); - -define @intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmadc.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv16i8.nxv16i8( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv32i8.nxv32i8( - , - , - i32); - -define @intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmadc.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv32i8.nxv32i8( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv64i8.nxv64i8( - , - , - i32); - -define @intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vmadc.vv v0, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv64i8.nxv64i8( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv1i16.nxv1i16( - , - , - i32); - -define @intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmadc.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv1i16.nxv1i16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv2i16.nxv2i16( - , - , - i32); - -define @intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmadc.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv2i16.nxv2i16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv4i16.nxv4i16( - , - , - i32); - -define @intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmadc.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv4i16.nxv4i16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv8i16.nxv8i16( - , - , - i32); - -define @intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmadc.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv8i16.nxv8i16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv16i16.nxv16i16( - , - , - i32); - -define @intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmadc.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv16i16.nxv16i16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv32i16.nxv32i16( - , - , - i32); - -define @intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vmadc.vv v0, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv32i16.nxv32i16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv1i32.nxv1i32( - , - , - i32); - -define @intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmadc.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv1i32.nxv1i32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv2i32.nxv2i32( - , - , - i32); - -define @intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmadc.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv2i32.nxv2i32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv4i32.nxv4i32( - , - , - i32); - -define @intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmadc.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv4i32.nxv4i32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv8i32.nxv8i32( - , - , - i32); - -define @intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmadc.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv8i32.nxv8i32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv16i32.nxv16i32( - , - , - i32); - -define @intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vmadc.vv v0, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv16i32.nxv16i32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv1i64.nxv1i64( - , - , - i32); - -define @intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmadc.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv1i64.nxv1i64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv2i64.nxv2i64( - , - , - i32); - -define @intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmadc.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv2i64.nxv2i64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv4i64.nxv4i64( - , - , - i32); - -define @intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmadc.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv4i64.nxv4i64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv8i64.nxv8i64( - , - , - i32); - -define @intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmadc.vv v0, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv8i64.nxv8i64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv1i8.i8( - , - i8, - i32); - -define @intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vmadc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv1i8.i8( - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv2i8.i8( - , - i8, - i32); - -define @intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vmadc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv2i8.i8( - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv4i8.i8( - , - i8, - i32); - -define @intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vmadc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv4i8.i8( - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv8i8.i8( - , - i8, - i32); - -define @intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vmadc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv8i8.i8( - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv16i8.i8( - , - i8, - i32); - -define @intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vmadc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv16i8.i8( - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv32i8.i8( - , - i8, - i32); - -define @intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vmadc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv32i8.i8( - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv64i8.i8( - , - i8, - i32); - -define @intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vmadc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv64i8.i8( - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv1i16.i16( - , - i16, - i32); - -define @intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vmadc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv1i16.i16( - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv2i16.i16( - , - i16, - i32); - -define @intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vmadc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv2i16.i16( - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv4i16.i16( - , - i16, - i32); - -define @intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vmadc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv4i16.i16( - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv8i16.i16( - , - i16, - i32); - -define @intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vmadc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv8i16.i16( - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv16i16.i16( - , - i16, - i32); - -define @intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vmadc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv16i16.i16( - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv32i16.i16( - , - i16, - i32); - -define @intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vmadc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv32i16.i16( - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv1i32.i32( - , - i32, - i32); - -define @intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vmadc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv1i32.i32( - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv2i32.i32( - , - i32, - i32); - -define @intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vmadc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv2i32.i32( - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv4i32.i32( - , - i32, - i32); - -define @intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vmadc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv4i32.i32( - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv8i32.i32( - , - i32, - i32); - -define @intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vmadc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv8i32.i32( - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv16i32.i32( - , - i32, - i32); - -define @intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vmadc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv16i32.i32( - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv1i64.i64( - , - i64, - i32); - -define @intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vmadc.vv v0, v8, v9 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv1i64.i64( - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv2i64.i64( - , - i64, - i32); - -define @intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vmadc.vv v0, v8, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv2i64.i64( - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv4i64.i64( - , - i64, - i32); - -define @intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmadc.vv v0, v8, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv4i64.i64( - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmadc.nxv8i64.i64( - , - i64, - i32); - -define @intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vmadc.vv v0, v8, v16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv8i64.i64( - %0, - i64 %1, - i32 %2) - - ret %a -} - -define @intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmadc.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv1i8.i8( - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmadc.vi v0, v8, -9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv2i8.i8( - %0, - i8 -9, - i32 %1) - - ret %a -} - -define @intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmadc.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv4i8.i8( - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmadc.vi v0, v8, -9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv8i8.i8( - %0, - i8 -9, - i32 %1) - - ret %a -} - -define @intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmadc.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv16i8.i8( - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmadc.vi v0, v8, -9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv32i8.i8( - %0, - i8 -9, - i32 %1) - - ret %a -} - -define @intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vmadc.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv64i8.i8( - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmadc.vi v0, v8, -9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv1i16.i16( - %0, - i16 -9, - i32 %1) - - ret %a -} - -define @intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmadc.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv2i16.i16( - %0, - i16 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmadc.vi v0, v8, -9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv4i16.i16( - %0, - i16 -9, - i32 %1) - - ret %a -} - -define @intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmadc.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv8i16.i16( - %0, - i16 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmadc.vi v0, v8, -9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv16i16.i16( - %0, - i16 -9, - i32 %1) - - ret %a -} - -define @intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vmadc.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv32i16.i16( - %0, - i16 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmadc.vi v0, v8, -9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv1i32.i32( - %0, - i32 -9, - i32 %1) - - ret %a -} - -define @intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmadc.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv2i32.i32( - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmadc.vi v0, v8, -9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv4i32.i32( - %0, - i32 -9, - i32 %1) - - ret %a -} - -define @intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmadc.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv8i32.i32( - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vmadc.vi v0, v8, -9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv16i32.i32( - %0, - i32 -9, - i32 %1) - - ret %a -} - -define @intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmadc.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv1i64.i64( - %0, - i64 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmadc.vi v0, v8, -9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv2i64.i64( - %0, - i64 -9, - i32 %1) - - ret %a -} - -define @intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmadc.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv4i64.i64( - %0, - i64 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmadc.vi v0, v8, -9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.nxv8i64.i64( - %0, - i64 -9, - i32 %1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll deleted file mode 100644 index 76800ea..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll +++ /dev/null @@ -1,1388 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8( - , - , - , - i64); - -define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8( - , - , - , - i64); - -define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8( - , - , - , - i64); - -define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8( - , - , - , - i64); - -define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8( - , - , - , - i64); - -define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmadc.vvm v12, v8, v10, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8( - , - , - , - i64); - -define @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmadc.vvm v16, v8, v12, v0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8( - , - , - , - i64); - -define @intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vmadc.vvm v24, v8, v16, v0 -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16( - , - , - , - i64); - -define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16( - , - , - , - i64); - -define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16( - , - , - , - i64); - -define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16( - , - , - , - i64); - -define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmadc.vvm v12, v8, v10, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16( - , - , - , - i64); - -define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmadc.vvm v16, v8, v12, v0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16( - , - , - , - i64); - -define @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vmadc.vvm v24, v8, v16, v0 -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32( - , - , - , - i64); - -define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32( - , - , - , - i64); - -define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32( - , - , - , - i64); - -define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmadc.vvm v12, v8, v10, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32( - , - , - , - i64); - -define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmadc.vvm v16, v8, v12, v0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32( - , - , - , - i64); - -define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vmadc.vvm v24, v8, v16, v0 -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64( - , - , - , - i64); - -define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64( - , - , - , - i64); - -define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmadc.vvm v12, v8, v10, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64( - , - , - , - i64); - -define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmadc.vvm v16, v8, v12, v0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64( - , - , - , - i64); - -define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmadc.vvm v24, v8, v16, v0 -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv1i8.i8( - , - i8, - , - i64); - -define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv1i8.i8( - %0, - i8 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv2i8.i8( - , - i8, - , - i64); - -define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv2i8.i8( - %0, - i8 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv4i8.i8( - , - i8, - , - i64); - -define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv4i8.i8( - %0, - i8 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv8i8.i8( - , - i8, - , - i64); - -define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv.v.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv8i8.i8( - %0, - i8 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv16i8.i8( - , - i8, - , - i64); - -define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vmadc.vxm v10, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv16i8.i8( - %0, - i8 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv32i8.i8( - , - i8, - , - i64); - -define @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vmadc.vxm v12, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv32i8.i8( - %0, - i8 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv64i8.i8( - , - i8, - , - i64); - -define @intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vmadc.vxm v16, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv64i8.i8( - %0, - i8 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv1i16.i16( - , - i16, - , - i64); - -define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv1i16.i16( - %0, - i16 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv2i16.i16( - , - i16, - , - i64); - -define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv2i16.i16( - %0, - i16 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv4i16.i16( - , - i16, - , - i64); - -define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv.v.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv4i16.i16( - %0, - i16 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv8i16.i16( - , - i16, - , - i64); - -define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vmadc.vxm v10, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv8i16.i16( - %0, - i16 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv16i16.i16( - , - i16, - , - i64); - -define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vmadc.vxm v12, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv16i16.i16( - %0, - i16 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv32i16.i16( - , - i16, - , - i64); - -define @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vmadc.vxm v16, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv32i16.i16( - %0, - i16 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv1i32.i32( - , - i32, - , - i64); - -define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv1i32.i32( - %0, - i32 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv2i32.i32( - , - i32, - , - i64); - -define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv.v.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv2i32.i32( - %0, - i32 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv4i32.i32( - , - i32, - , - i64); - -define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vmadc.vxm v10, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv4i32.i32( - %0, - i32 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv8i32.i32( - , - i32, - , - i64); - -define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vmadc.vxm v12, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv8i32.i32( - %0, - i32 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv16i32.i32( - , - i32, - , - i64); - -define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vmadc.vxm v16, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv16i32.i32( - %0, - i32 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv1i64.i64( - , - i64, - , - i64); - -define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv.v.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv1i64.i64( - %0, - i64 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv2i64.i64( - , - i64, - , - i64); - -define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vmadc.vxm v10, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv2i64.i64( - %0, - i64 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv4i64.i64( - , - i64, - , - i64); - -define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vmadc.vxm v12, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv4i64.i64( - %0, - i64 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmadc.carry.in.nxv8i64.i64( - , - i64, - , - i64); - -define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vmadc.vxm v16, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv8i64.i64( - %0, - i64 %1, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv1i8.i8( - %0, - i8 9, - %1, - i64 %2) - - ret %a -} - -define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv2i8.i8( - %0, - i8 9, - %1, - i64 %2) - - ret %a -} - -define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv4i8.i8( - %0, - i8 9, - %1, - i64 %2) - - ret %a -} - -define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 -; CHECK-NEXT: vmv.v.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv8i8.i8( - %0, - i8 9, - %1, - i64 %2) - - ret %a -} - -define @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmadc.vim v10, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv16i8.i8( - %0, - i8 9, - %1, - i64 %2) - - ret %a -} - -define @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmadc.vim v12, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv32i8.i8( - %0, - i8 9, - %1, - i64 %2) - - ret %a -} - -define @intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vmadc.vim v16, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv64i8.i8( - %0, - i8 9, - %1, - i64 %2) - - ret %a -} - -define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv1i16.i16( - %0, - i16 9, - %1, - i64 %2) - - ret %a -} - -define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv2i16.i16( - %0, - i16 9, - %1, - i64 %2) - - ret %a -} - -define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 -; CHECK-NEXT: vmv.v.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv4i16.i16( - %0, - i16 9, - %1, - i64 %2) - - ret %a -} - -define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmadc.vim v10, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv8i16.i16( - %0, - i16 9, - %1, - i64 %2) - - ret %a -} - -define @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmadc.vim v12, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv16i16.i16( - %0, - i16 9, - %1, - i64 %2) - - ret %a -} - -define @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vmadc.vim v16, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv32i16.i16( - %0, - i16 9, - %1, - i64 %2) - - ret %a -} - -define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv1i32.i32( - %0, - i32 9, - %1, - i64 %2) - - ret %a -} - -define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 -; CHECK-NEXT: vmv.v.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv2i32.i32( - %0, - i32 9, - %1, - i64 %2) - - ret %a -} - -define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmadc.vim v10, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv4i32.i32( - %0, - i32 9, - %1, - i64 %2) - - ret %a -} - -define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmadc.vim v12, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv8i32.i32( - %0, - i32 9, - %1, - i64 %2) - - ret %a -} - -define @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vmadc.vim v16, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv16i32.i32( - %0, - i32 9, - %1, - i64 %2) - - ret %a -} - -define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 -; CHECK-NEXT: vmv.v.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv1i64.i64( - %0, - i64 9, - %1, - i64 %2) - - ret %a -} - -define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmadc.vim v10, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv2i64.i64( - %0, - i64 9, - %1, - i64 %2) - - ret %a -} - -define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmadc.vim v12, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv4i64.i64( - %0, - i64 9, - %1, - i64 %2) - - ret %a -} - -define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmadc.vim v16, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadc.carry.in.nxv8i64.i64( - %0, - i64 9, - %1, - i64 %2) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in.ll similarity index 83% rename from llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in.ll index b939b11..c24a9ba 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8( , , , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -19,7 +21,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8( , , , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -42,7 +44,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8( , , , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -65,7 +67,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8( , , , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -88,7 +90,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8( , , , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -111,7 +113,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8( , , , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -134,7 +136,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8( , , , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma @@ -157,7 +159,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16( , , , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -180,7 +182,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16( , , , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -203,7 +205,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16( , , , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -226,7 +228,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16( , , , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -249,7 +251,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16( , , , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -272,7 +274,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16( , , , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma @@ -295,7 +297,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32( , , , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -318,7 +320,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32( , , , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -341,7 +343,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32( , , , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -364,7 +366,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32( , , , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -387,7 +389,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32( , , , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -410,7 +412,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64( , , , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -433,7 +435,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64( , , , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -456,7 +458,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64( , , , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -479,7 +481,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64( , , , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma @@ -502,7 +504,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv1i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -525,7 +527,7 @@ entry: %0, i8 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv2i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -548,7 +550,7 @@ entry: %0, i8 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv4i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -571,7 +573,7 @@ entry: %0, i8 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv8i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -594,7 +596,7 @@ entry: %0, i8 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv16i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -617,7 +619,7 @@ entry: %0, i8 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv32i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -640,7 +642,7 @@ entry: %0, i8 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv64i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma @@ -663,7 +665,7 @@ entry: %0, i8 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv1i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -686,7 +688,7 @@ entry: %0, i16 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -695,9 +697,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv2i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -709,7 +711,7 @@ entry: %0, i16 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -718,9 +720,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv4i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -732,7 +734,7 @@ entry: %0, i16 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -741,9 +743,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv8i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -755,7 +757,7 @@ entry: %0, i16 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -764,9 +766,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv16i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -778,7 +780,7 @@ entry: %0, i16 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -787,9 +789,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv32i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma @@ -801,7 +803,7 @@ entry: %0, i16 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -810,9 +812,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv1i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -824,7 +826,7 @@ entry: %0, i32 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -833,9 +835,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv2i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -847,7 +849,7 @@ entry: %0, i32 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -856,9 +858,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv4i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -870,7 +872,7 @@ entry: %0, i32 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -879,9 +881,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv8i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -893,7 +895,7 @@ entry: %0, i32 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -902,9 +904,9 @@ declare @llvm.riscv.vmadc.carry.in.nxv16i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma @@ -916,7 +918,7 @@ entry: %0, i32 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -925,27 +927,34 @@ declare @llvm.riscv.vmadc.carry.in.nxv1i64.i64( , i64, , - i32); - -define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vmadc.vvm v9, v8, v10, v0 -; CHECK-NEXT: vmv.v.v v0, v9 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vmadc.vvm v9, v8, v10, v0 +; RV32-NEXT: vmv.v.v v0, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmadc.vxm v9, v8, a0, v0 +; RV64-NEXT: vmv.v.v v0, v9 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv1i64.i64( %0, i64 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -954,27 +963,34 @@ declare @llvm.riscv.vmadc.carry.in.nxv2i64.i64( , i64, , - i32); - -define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmadc.vvm v10, v8, v12, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmadc.vvm v10, v8, v12, v0 +; RV32-NEXT: vmv1r.v v0, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vmadc.vxm v10, v8, a0, v0 +; RV64-NEXT: vmv1r.v v0, v10 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv2i64.i64( %0, i64 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -983,27 +999,34 @@ declare @llvm.riscv.vmadc.carry.in.nxv4i64.i64( , i64, , - i32); - -define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vmadc.vvm v12, v8, v16, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vmadc.vvm v12, v8, v16, v0 +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmadc.vxm v12, v8, a0, v0 +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv4i64.i64( %0, i64 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1012,32 +1035,39 @@ declare @llvm.riscv.vmadc.carry.in.nxv8i64.i64( , i64, , - i32); - -define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vmadc.vvm v16, v8, v24, v0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vmadc.vvm v16, v8, v24, v0 +; RV32-NEXT: vmv1r.v v0, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmadc.vxm v16, v8, a0, v0 +; RV64-NEXT: vmv1r.v v0, v16 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv8i64.i64( %0, i64 %1, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -1049,12 +1079,12 @@ entry: %0, i8 9, %1, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -1066,12 +1096,12 @@ entry: %0, i8 9, %1, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -1083,12 +1113,12 @@ entry: %0, i8 9, %1, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -1100,12 +1130,12 @@ entry: %0, i8 9, %1, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -1117,12 +1147,12 @@ entry: %0, i8 9, %1, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -1134,12 +1164,12 @@ entry: %0, i8 9, %1, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma @@ -1151,12 +1181,12 @@ entry: %0, i8 9, %1, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -1168,12 +1198,12 @@ entry: %0, i16 9, %1, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -1185,12 +1215,12 @@ entry: %0, i16 9, %1, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -1202,12 +1232,12 @@ entry: %0, i16 9, %1, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -1219,12 +1249,12 @@ entry: %0, i16 9, %1, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -1236,12 +1266,12 @@ entry: %0, i16 9, %1, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma @@ -1253,12 +1283,12 @@ entry: %0, i16 9, %1, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -1270,12 +1300,12 @@ entry: %0, i32 9, %1, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -1287,12 +1317,12 @@ entry: %0, i32 9, %1, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -1304,12 +1334,12 @@ entry: %0, i32 9, %1, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -1321,12 +1351,12 @@ entry: %0, i32 9, %1, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -1338,12 +1368,12 @@ entry: %0, i32 9, %1, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -1355,12 +1385,12 @@ entry: %0, i64 9, %1, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -1372,12 +1402,12 @@ entry: %0, i64 9, %1, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -1389,12 +1419,12 @@ entry: %0, i64 9, %1, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma @@ -1406,7 +1436,7 @@ entry: %0, i64 9, %1, - i32 %2) + iXLen %2) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.ll similarity index 79% rename from llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vmadc.ll index a13cc8a..bfa4ce3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vmadc.nxv1i8.nxv1i8( , , - i64); + iXLen); -define @intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv1i8.nxv1i8( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -24,9 +26,9 @@ entry: declare @llvm.riscv.vmadc.nxv2i8.nxv2i8( , , - i64); + iXLen); -define @intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -36,7 +38,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv2i8.nxv2i8( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -44,9 +46,9 @@ entry: declare @llvm.riscv.vmadc.nxv4i8.nxv4i8( , , - i64); + iXLen); -define @intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -56,7 +58,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv4i8.nxv4i8( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -64,9 +66,9 @@ entry: declare @llvm.riscv.vmadc.nxv8i8.nxv8i8( , , - i64); + iXLen); -define @intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -76,7 +78,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv8i8.nxv8i8( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -84,9 +86,9 @@ entry: declare @llvm.riscv.vmadc.nxv16i8.nxv16i8( , , - i64); + iXLen); -define @intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -96,7 +98,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv16i8.nxv16i8( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -104,9 +106,9 @@ entry: declare @llvm.riscv.vmadc.nxv32i8.nxv32i8( , , - i64); + iXLen); -define @intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -116,7 +118,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv32i8.nxv32i8( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -124,9 +126,9 @@ entry: declare @llvm.riscv.vmadc.nxv64i8.nxv64i8( , , - i64); + iXLen); -define @intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma @@ -136,7 +138,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv64i8.nxv64i8( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -144,9 +146,9 @@ entry: declare @llvm.riscv.vmadc.nxv1i16.nxv1i16( , , - i64); + iXLen); -define @intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -156,7 +158,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv1i16.nxv1i16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -164,9 +166,9 @@ entry: declare @llvm.riscv.vmadc.nxv2i16.nxv2i16( , , - i64); + iXLen); -define @intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -176,7 +178,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv2i16.nxv2i16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vmadc.nxv4i16.nxv4i16( , , - i64); + iXLen); -define @intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv4i16.nxv4i16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -204,9 +206,9 @@ entry: declare @llvm.riscv.vmadc.nxv8i16.nxv8i16( , , - i64); + iXLen); -define @intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -216,7 +218,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv8i16.nxv8i16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -224,9 +226,9 @@ entry: declare @llvm.riscv.vmadc.nxv16i16.nxv16i16( , , - i64); + iXLen); -define @intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -236,7 +238,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv16i16.nxv16i16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -244,9 +246,9 @@ entry: declare @llvm.riscv.vmadc.nxv32i16.nxv32i16( , , - i64); + iXLen); -define @intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma @@ -256,7 +258,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv32i16.nxv32i16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -264,9 +266,9 @@ entry: declare @llvm.riscv.vmadc.nxv1i32.nxv1i32( , , - i64); + iXLen); -define @intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -276,7 +278,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv1i32.nxv1i32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -284,9 +286,9 @@ entry: declare @llvm.riscv.vmadc.nxv2i32.nxv2i32( , , - i64); + iXLen); -define @intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -296,7 +298,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv2i32.nxv2i32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -304,9 +306,9 @@ entry: declare @llvm.riscv.vmadc.nxv4i32.nxv4i32( , , - i64); + iXLen); -define @intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -316,7 +318,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv4i32.nxv4i32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -324,9 +326,9 @@ entry: declare @llvm.riscv.vmadc.nxv8i32.nxv8i32( , , - i64); + iXLen); -define @intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -336,7 +338,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv8i32.nxv8i32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -344,9 +346,9 @@ entry: declare @llvm.riscv.vmadc.nxv16i32.nxv16i32( , , - i64); + iXLen); -define @intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -356,7 +358,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv16i32.nxv16i32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -364,9 +366,9 @@ entry: declare @llvm.riscv.vmadc.nxv1i64.nxv1i64( , , - i64); + iXLen); -define @intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -376,7 +378,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv1i64.nxv1i64( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -384,9 +386,9 @@ entry: declare @llvm.riscv.vmadc.nxv2i64.nxv2i64( , , - i64); + iXLen); -define @intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -396,7 +398,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv2i64.nxv2i64( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -404,9 +406,9 @@ entry: declare @llvm.riscv.vmadc.nxv4i64.nxv4i64( , , - i64); + iXLen); -define @intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -416,7 +418,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv4i64.nxv4i64( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -424,9 +426,9 @@ entry: declare @llvm.riscv.vmadc.nxv8i64.nxv8i64( , , - i64); + iXLen); -define @intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma @@ -436,7 +438,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv8i64.nxv8i64( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -444,9 +446,9 @@ entry: declare @llvm.riscv.vmadc.nxv1i8.i8( , i8, - i64); + iXLen); -define @intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -456,7 +458,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv1i8.i8( %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -464,9 +466,9 @@ entry: declare @llvm.riscv.vmadc.nxv2i8.i8( , i8, - i64); + iXLen); -define @intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -476,7 +478,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv2i8.i8( %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -484,9 +486,9 @@ entry: declare @llvm.riscv.vmadc.nxv4i8.i8( , i8, - i64); + iXLen); -define @intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -496,7 +498,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv4i8.i8( %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -504,9 +506,9 @@ entry: declare @llvm.riscv.vmadc.nxv8i8.i8( , i8, - i64); + iXLen); -define @intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -516,7 +518,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv8i8.i8( %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -524,9 +526,9 @@ entry: declare @llvm.riscv.vmadc.nxv16i8.i8( , i8, - i64); + iXLen); -define @intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -536,7 +538,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv16i8.i8( %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -544,9 +546,9 @@ entry: declare @llvm.riscv.vmadc.nxv32i8.i8( , i8, - i64); + iXLen); -define @intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -556,7 +558,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv32i8.i8( %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -564,9 +566,9 @@ entry: declare @llvm.riscv.vmadc.nxv64i8.i8( , i8, - i64); + iXLen); -define @intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma @@ -576,7 +578,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv64i8.i8( %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -584,9 +586,9 @@ entry: declare @llvm.riscv.vmadc.nxv1i16.i16( , i16, - i64); + iXLen); -define @intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -596,7 +598,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv1i16.i16( %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -604,9 +606,9 @@ entry: declare @llvm.riscv.vmadc.nxv2i16.i16( , i16, - i64); + iXLen); -define @intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -616,7 +618,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv2i16.i16( %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -624,9 +626,9 @@ entry: declare @llvm.riscv.vmadc.nxv4i16.i16( , i16, - i64); + iXLen); -define @intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -636,7 +638,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv4i16.i16( %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -644,9 +646,9 @@ entry: declare @llvm.riscv.vmadc.nxv8i16.i16( , i16, - i64); + iXLen); -define @intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -656,7 +658,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv8i16.i16( %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -664,9 +666,9 @@ entry: declare @llvm.riscv.vmadc.nxv16i16.i16( , i16, - i64); + iXLen); -define @intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -676,7 +678,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv16i16.i16( %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -684,9 +686,9 @@ entry: declare @llvm.riscv.vmadc.nxv32i16.i16( , i16, - i64); + iXLen); -define @intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma @@ -696,7 +698,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv32i16.i16( %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -704,9 +706,9 @@ entry: declare @llvm.riscv.vmadc.nxv1i32.i32( , i32, - i64); + iXLen); -define @intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -716,7 +718,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv1i32.i32( %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -724,9 +726,9 @@ entry: declare @llvm.riscv.vmadc.nxv2i32.i32( , i32, - i64); + iXLen); -define @intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -736,7 +738,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv2i32.i32( %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -744,9 +746,9 @@ entry: declare @llvm.riscv.vmadc.nxv4i32.i32( , i32, - i64); + iXLen); -define @intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -756,7 +758,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv4i32.i32( %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -764,9 +766,9 @@ entry: declare @llvm.riscv.vmadc.nxv8i32.i32( , i32, - i64); + iXLen); -define @intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -776,7 +778,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv8i32.i32( %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -784,9 +786,9 @@ entry: declare @llvm.riscv.vmadc.nxv16i32.i32( , i32, - i64); + iXLen); -define @intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma @@ -796,7 +798,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv16i32.i32( %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -804,19 +806,31 @@ entry: declare @llvm.riscv.vmadc.nxv1i64.i64( , i64, - i64); - -define @intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vmadc.vx v0, v8, a0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vmadc.vv v0, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmadc.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmadc.nxv1i64.i64( %0, i64 %1, - i64 %2) + iXLen %2) ret %a } @@ -824,19 +838,31 @@ entry: declare @llvm.riscv.vmadc.nxv2i64.i64( , i64, - i64); - -define @intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vmadc.vx v0, v8, a0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vmadc.vv v0, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vmadc.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmadc.nxv2i64.i64( %0, i64 %1, - i64 %2) + iXLen %2) ret %a } @@ -844,19 +870,31 @@ entry: declare @llvm.riscv.vmadc.nxv4i64.i64( , i64, - i64); - -define @intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vmadc.vx v0, v8, a0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmadc.vv v0, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmadc.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmadc.nxv4i64.i64( %0, i64 %1, - i64 %2) + iXLen %2) ret %a } @@ -864,24 +902,36 @@ entry: declare @llvm.riscv.vmadc.nxv8i64.i64( , i64, - i64); - -define @intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vmadc.vx v0, v8, a0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vmadc.vv v0, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmadc.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmadc.nxv8i64.i64( %0, i64 %1, - i64 %2) + iXLen %2) ret %a } -define @intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8( %0, i64 %1) nounwind { +define @intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -891,12 +941,12 @@ entry: %a = call @llvm.riscv.vmadc.nxv1i8.i8( %0, i8 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8( %0, i64 %1) nounwind { +define @intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -906,12 +956,12 @@ entry: %a = call @llvm.riscv.vmadc.nxv2i8.i8( %0, i8 -9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8( %0, i64 %1) nounwind { +define @intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -921,12 +971,12 @@ entry: %a = call @llvm.riscv.vmadc.nxv4i8.i8( %0, i8 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8( %0, i64 %1) nounwind { +define @intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -936,12 +986,12 @@ entry: %a = call @llvm.riscv.vmadc.nxv8i8.i8( %0, i8 -9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8( %0, i64 %1) nounwind { +define @intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -951,12 +1001,12 @@ entry: %a = call @llvm.riscv.vmadc.nxv16i8.i8( %0, i8 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8( %0, i64 %1) nounwind { +define @intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -966,12 +1016,12 @@ entry: %a = call @llvm.riscv.vmadc.nxv32i8.i8( %0, i8 -9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8( %0, i64 %1) nounwind { +define @intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma @@ -981,12 +1031,12 @@ entry: %a = call @llvm.riscv.vmadc.nxv64i8.i8( %0, i8 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16( %0, i64 %1) nounwind { +define @intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -996,12 +1046,12 @@ entry: %a = call @llvm.riscv.vmadc.nxv1i16.i16( %0, i16 -9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16( %0, i64 %1) nounwind { +define @intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -1011,12 +1061,12 @@ entry: %a = call @llvm.riscv.vmadc.nxv2i16.i16( %0, i16 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16( %0, i64 %1) nounwind { +define @intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -1026,12 +1076,12 @@ entry: %a = call @llvm.riscv.vmadc.nxv4i16.i16( %0, i16 -9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16( %0, i64 %1) nounwind { +define @intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -1041,12 +1091,12 @@ entry: %a = call @llvm.riscv.vmadc.nxv8i16.i16( %0, i16 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16( %0, i64 %1) nounwind { +define @intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -1056,12 +1106,12 @@ entry: %a = call @llvm.riscv.vmadc.nxv16i16.i16( %0, i16 -9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16( %0, i64 %1) nounwind { +define @intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma @@ -1071,12 +1121,12 @@ entry: %a = call @llvm.riscv.vmadc.nxv32i16.i16( %0, i16 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32( %0, i64 %1) nounwind { +define @intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -1086,12 +1136,12 @@ entry: %a = call @llvm.riscv.vmadc.nxv1i32.i32( %0, i32 -9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32( %0, i64 %1) nounwind { +define @intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -1101,12 +1151,12 @@ entry: %a = call @llvm.riscv.vmadc.nxv2i32.i32( %0, i32 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32( %0, i64 %1) nounwind { +define @intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -1116,12 +1166,12 @@ entry: %a = call @llvm.riscv.vmadc.nxv4i32.i32( %0, i32 -9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32( %0, i64 %1) nounwind { +define @intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -1131,12 +1181,12 @@ entry: %a = call @llvm.riscv.vmadc.nxv8i32.i32( %0, i32 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32( %0, i64 %1) nounwind { +define @intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -1146,12 +1196,12 @@ entry: %a = call @llvm.riscv.vmadc.nxv16i32.i32( %0, i32 -9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64( %0, i64 %1) nounwind { +define @intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -1161,12 +1211,12 @@ entry: %a = call @llvm.riscv.vmadc.nxv1i64.i64( %0, i64 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64( %0, i64 %1) nounwind { +define @intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -1176,12 +1226,12 @@ entry: %a = call @llvm.riscv.vmadc.nxv2i64.i64( %0, i64 -9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64( %0, i64 %1) nounwind { +define @intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -1191,12 +1241,12 @@ entry: %a = call @llvm.riscv.vmadc.nxv4i64.i64( %0, i64 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64( %0, i64 %1) nounwind { +define @intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma @@ -1206,7 +1256,7 @@ entry: %a = call @llvm.riscv.vmadc.nxv8i64.i64( %0, i64 -9, - i64 %1) + iXLen %1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll deleted file mode 100644 index d672e27..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll +++ /dev/null @@ -1,1694 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmadd.nxv1i8.nxv1i8( - , - , - , - i64, - i64); - -define @intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma -; CHECK-NEXT: vmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv1i8.nxv1i8( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8( - , - , - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu -; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv2i8.nxv2i8( - , - , - , - i64, - i64); - -define @intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma -; CHECK-NEXT: vmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv2i8.nxv2i8( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8( - , - , - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu -; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv4i8.nxv4i8( - , - , - , - i64, - i64); - -define @intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma -; CHECK-NEXT: vmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv4i8.nxv4i8( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8( - , - , - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu -; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv8i8.nxv8i8( - , - , - , - i64, - i64); - -define @intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma -; CHECK-NEXT: vmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv8i8.nxv8i8( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8( - , - , - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu -; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv16i8.nxv16i8( - , - , - , - i64, - i64); - -define @intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma -; CHECK-NEXT: vmadd.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv16i8.nxv16i8( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8( - , - , - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu -; CHECK-NEXT: vmadd.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv32i8.nxv32i8( - , - , - , - i64, - i64); - -define @intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma -; CHECK-NEXT: vmadd.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv32i8.nxv32i8( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8( - , - , - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu -; CHECK-NEXT: vmadd.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv1i16.nxv1i16( - , - , - , - i64, - i64); - -define @intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma -; CHECK-NEXT: vmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv1i16.nxv1i16( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16( - , - , - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv2i16.nxv2i16( - , - , - , - i64, - i64); - -define @intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma -; CHECK-NEXT: vmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv2i16.nxv2i16( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16( - , - , - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv4i16.nxv4i16( - , - , - , - i64, - i64); - -define @intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma -; CHECK-NEXT: vmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv4i16.nxv4i16( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16( - , - , - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv8i16.nxv8i16( - , - , - , - i64, - i64); - -define @intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma -; CHECK-NEXT: vmadd.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv8i16.nxv8i16( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16( - , - , - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vmadd.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv16i16.nxv16i16( - , - , - , - i64, - i64); - -define @intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma -; CHECK-NEXT: vmadd.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv16i16.nxv16i16( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16( - , - , - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vmadd.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv1i32.nxv1i32( - , - , - , - i64, - i64); - -define @intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma -; CHECK-NEXT: vmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv1i32.nxv1i32( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32( - , - , - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv2i32.nxv2i32( - , - , - , - i64, - i64); - -define @intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma -; CHECK-NEXT: vmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv2i32.nxv2i32( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32( - , - , - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv4i32.nxv4i32( - , - , - , - i64, - i64); - -define @intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma -; CHECK-NEXT: vmadd.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv4i32.nxv4i32( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32( - , - , - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vmadd.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv8i32.nxv8i32( - , - , - , - i64, - i64); - -define @intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma -; CHECK-NEXT: vmadd.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv8i32.nxv8i32( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32( - , - , - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vmadd.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv1i64.nxv1i64( - , - , - , - i64, - i64); - -define @intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma -; CHECK-NEXT: vmadd.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv1i64.nxv1i64( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64( - , - , - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv2i64.nxv2i64( - , - , - , - i64, - i64); - -define @intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma -; CHECK-NEXT: vmadd.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv2i64.nxv2i64( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64( - , - , - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vmadd.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv4i64.nxv4i64( - , - , - , - i64, - i64); - -define @intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma -; CHECK-NEXT: vmadd.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv4i64.nxv4i64( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64( - , - , - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vmadd.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv1i8.i8( - , - i8, - , - i64, - i64); - -define @intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma -; CHECK-NEXT: vmadd.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv1i8.i8( - %0, - i8 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv1i8.i8( - , - i8, - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu -; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv1i8.i8( - %0, - i8 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv2i8.i8( - , - i8, - , - i64, - i64); - -define @intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma -; CHECK-NEXT: vmadd.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv2i8.i8( - %0, - i8 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv2i8.i8( - , - i8, - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu -; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv2i8.i8( - %0, - i8 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv4i8.i8( - , - i8, - , - i64, - i64); - -define @intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma -; CHECK-NEXT: vmadd.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv4i8.i8( - %0, - i8 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv4i8.i8( - , - i8, - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu -; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv4i8.i8( - %0, - i8 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv8i8.i8( - , - i8, - , - i64, - i64); - -define @intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma -; CHECK-NEXT: vmadd.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv8i8.i8( - %0, - i8 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv8i8.i8( - , - i8, - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu -; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv8i8.i8( - %0, - i8 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv16i8.i8( - , - i8, - , - i64, - i64); - -define @intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma -; CHECK-NEXT: vmadd.vx v8, a0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv16i8.i8( - %0, - i8 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv16i8.i8( - , - i8, - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu -; CHECK-NEXT: vmadd.vx v8, a0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv16i8.i8( - %0, - i8 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv32i8.i8( - , - i8, - , - i64, - i64); - -define @intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma -; CHECK-NEXT: vmadd.vx v8, a0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv32i8.i8( - %0, - i8 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv32i8.i8( - , - i8, - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu -; CHECK-NEXT: vmadd.vx v8, a0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv32i8.i8( - %0, - i8 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv1i16.i16( - , - i16, - , - i64, - i64); - -define @intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma -; CHECK-NEXT: vmadd.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv1i16.i16( - %0, - i16 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv1i16.i16( - , - i16, - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv1i16.i16( - %0, - i16 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv2i16.i16( - , - i16, - , - i64, - i64); - -define @intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma -; CHECK-NEXT: vmadd.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv2i16.i16( - %0, - i16 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv2i16.i16( - , - i16, - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv2i16.i16( - %0, - i16 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv4i16.i16( - , - i16, - , - i64, - i64); - -define @intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma -; CHECK-NEXT: vmadd.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv4i16.i16( - %0, - i16 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv4i16.i16( - , - i16, - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv4i16.i16( - %0, - i16 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv8i16.i16( - , - i16, - , - i64, - i64); - -define @intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma -; CHECK-NEXT: vmadd.vx v8, a0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv8i16.i16( - %0, - i16 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv8i16.i16( - , - i16, - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vmadd.vx v8, a0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv8i16.i16( - %0, - i16 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv16i16.i16( - , - i16, - , - i64, - i64); - -define @intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma -; CHECK-NEXT: vmadd.vx v8, a0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv16i16.i16( - %0, - i16 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv16i16.i16( - , - i16, - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vmadd.vx v8, a0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv16i16.i16( - %0, - i16 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv1i32.i32( - , - i32, - , - i64, - i64); - -define @intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma -; CHECK-NEXT: vmadd.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv1i32.i32( - %0, - i32 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv1i32.i32( - , - i32, - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv1i32.i32( - %0, - i32 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv2i32.i32( - , - i32, - , - i64, - i64); - -define @intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma -; CHECK-NEXT: vmadd.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv2i32.i32( - %0, - i32 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv2i32.i32( - , - i32, - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv2i32.i32( - %0, - i32 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv4i32.i32( - , - i32, - , - i64, - i64); - -define @intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma -; CHECK-NEXT: vmadd.vx v8, a0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv4i32.i32( - %0, - i32 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv4i32.i32( - , - i32, - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vmadd.vx v8, a0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv4i32.i32( - %0, - i32 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv8i32.i32( - , - i32, - , - i64, - i64); - -define @intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma -; CHECK-NEXT: vmadd.vx v8, a0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv8i32.i32( - %0, - i32 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv8i32.i32( - , - i32, - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vmadd.vx v8, a0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv8i32.i32( - %0, - i32 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv1i64.i64( - , - i64, - , - i64, - i64); - -define @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, ma -; CHECK-NEXT: vmadd.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv1i64.i64( - %0, - i64 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv1i64.i64( - , - i64, - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu -; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv1i64.i64( - %0, - i64 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv2i64.i64( - , - i64, - , - i64, - i64); - -define @intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, ma -; CHECK-NEXT: vmadd.vx v8, a0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv2i64.i64( - %0, - i64 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv2i64.i64( - , - i64, - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu -; CHECK-NEXT: vmadd.vx v8, a0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv2i64.i64( - %0, - i64 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.nxv4i64.i64( - , - i64, - , - i64, - i64); - -define @intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, ma -; CHECK-NEXT: vmadd.vx v8, a0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.nxv4i64.i64( - %0, - i64 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vmadd.mask.nxv4i64.i64( - , - i64, - , - , - i64, i64); - -define @intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu -; CHECK-NEXT: vmadd.vx v8, a0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmadd.mask.nxv4i64.i64( - %0, - i64 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd.ll similarity index 80% rename from llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vmadd.ll index 547b422..8eed060 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadd.ll @@ -1,14 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vmadd.nxv1i8.nxv1i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma @@ -19,7 +21,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -29,9 +31,9 @@ declare @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu @@ -43,7 +45,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -52,10 +54,10 @@ declare @llvm.riscv.vmadd.nxv2i8.nxv2i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma @@ -66,7 +68,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -76,9 +78,9 @@ declare @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu @@ -90,7 +92,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -99,10 +101,10 @@ declare @llvm.riscv.vmadd.nxv4i8.nxv4i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma @@ -113,7 +115,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -123,9 +125,9 @@ declare @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu @@ -137,7 +139,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -146,10 +148,10 @@ declare @llvm.riscv.vmadd.nxv8i8.nxv8i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma @@ -160,7 +162,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -170,9 +172,9 @@ declare @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu @@ -184,7 +186,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -193,10 +195,10 @@ declare @llvm.riscv.vmadd.nxv16i8.nxv16i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma @@ -207,7 +209,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -217,9 +219,9 @@ declare @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu @@ -231,7 +233,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -240,10 +242,10 @@ declare @llvm.riscv.vmadd.nxv32i8.nxv32i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma @@ -254,7 +256,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -264,9 +266,9 @@ declare @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu @@ -278,7 +280,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -287,10 +289,10 @@ declare @llvm.riscv.vmadd.nxv1i16.nxv1i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma @@ -301,7 +303,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -311,9 +313,9 @@ declare @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -325,7 +327,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -334,10 +336,10 @@ declare @llvm.riscv.vmadd.nxv2i16.nxv2i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma @@ -348,7 +350,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -358,9 +360,9 @@ declare @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -372,7 +374,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -381,10 +383,10 @@ declare @llvm.riscv.vmadd.nxv4i16.nxv4i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma @@ -395,7 +397,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -405,9 +407,9 @@ declare @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -419,7 +421,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -428,10 +430,10 @@ declare @llvm.riscv.vmadd.nxv8i16.nxv8i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma @@ -442,7 +444,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -452,9 +454,9 @@ declare @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -466,7 +468,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -475,10 +477,10 @@ declare @llvm.riscv.vmadd.nxv16i16.nxv16i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma @@ -489,7 +491,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -499,9 +501,9 @@ declare @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -513,7 +515,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -522,10 +524,10 @@ declare @llvm.riscv.vmadd.nxv1i32.nxv1i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma @@ -536,7 +538,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -546,9 +548,9 @@ declare @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -560,7 +562,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -569,10 +571,10 @@ declare @llvm.riscv.vmadd.nxv2i32.nxv2i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma @@ -583,7 +585,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -593,9 +595,9 @@ declare @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -607,7 +609,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -616,10 +618,10 @@ declare @llvm.riscv.vmadd.nxv4i32.nxv4i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma @@ -630,7 +632,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -640,9 +642,9 @@ declare @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -654,7 +656,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -663,10 +665,10 @@ declare @llvm.riscv.vmadd.nxv8i32.nxv8i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma @@ -677,7 +679,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -687,9 +689,9 @@ declare @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -701,7 +703,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -710,10 +712,10 @@ declare @llvm.riscv.vmadd.nxv1i64.nxv1i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma @@ -724,7 +726,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -734,9 +736,9 @@ declare @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -748,7 +750,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -757,10 +759,10 @@ declare @llvm.riscv.vmadd.nxv2i64.nxv2i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma @@ -771,7 +773,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -781,9 +783,9 @@ declare @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -795,7 +797,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -804,10 +806,10 @@ declare @llvm.riscv.vmadd.nxv4i64.nxv4i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma @@ -818,7 +820,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -828,9 +830,9 @@ declare @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -842,7 +844,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -851,10 +853,10 @@ declare @llvm.riscv.vmadd.nxv1i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma @@ -865,7 +867,7 @@ entry: %0, i8 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -875,9 +877,9 @@ declare @llvm.riscv.vmadd.mask.nxv1i8.i8( i8, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu @@ -889,7 +891,7 @@ entry: i8 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -898,10 +900,10 @@ declare @llvm.riscv.vmadd.nxv2i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma @@ -912,7 +914,7 @@ entry: %0, i8 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -922,9 +924,9 @@ declare @llvm.riscv.vmadd.mask.nxv2i8.i8( i8, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu @@ -936,7 +938,7 @@ entry: i8 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -945,10 +947,10 @@ declare @llvm.riscv.vmadd.nxv4i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma @@ -959,7 +961,7 @@ entry: %0, i8 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -969,9 +971,9 @@ declare @llvm.riscv.vmadd.mask.nxv4i8.i8( i8, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu @@ -983,7 +985,7 @@ entry: i8 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -992,10 +994,10 @@ declare @llvm.riscv.vmadd.nxv8i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma @@ -1006,7 +1008,7 @@ entry: %0, i8 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1016,9 +1018,9 @@ declare @llvm.riscv.vmadd.mask.nxv8i8.i8( i8, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu @@ -1030,7 +1032,7 @@ entry: i8 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1039,10 +1041,10 @@ declare @llvm.riscv.vmadd.nxv16i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma @@ -1053,7 +1055,7 @@ entry: %0, i8 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1063,9 +1065,9 @@ declare @llvm.riscv.vmadd.mask.nxv16i8.i8( i8, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu @@ -1077,7 +1079,7 @@ entry: i8 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1086,10 +1088,10 @@ declare @llvm.riscv.vmadd.nxv32i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma @@ -1100,7 +1102,7 @@ entry: %0, i8 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1110,9 +1112,9 @@ declare @llvm.riscv.vmadd.mask.nxv32i8.i8( i8, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu @@ -1124,7 +1126,7 @@ entry: i8 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1133,10 +1135,10 @@ declare @llvm.riscv.vmadd.nxv1i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma @@ -1147,7 +1149,7 @@ entry: %0, i16 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1157,9 +1159,9 @@ declare @llvm.riscv.vmadd.mask.nxv1i16.i16( i16, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu @@ -1171,7 +1173,7 @@ entry: i16 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1180,10 +1182,10 @@ declare @llvm.riscv.vmadd.nxv2i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma @@ -1194,7 +1196,7 @@ entry: %0, i16 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1204,9 +1206,9 @@ declare @llvm.riscv.vmadd.mask.nxv2i16.i16( i16, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu @@ -1218,7 +1220,7 @@ entry: i16 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1227,10 +1229,10 @@ declare @llvm.riscv.vmadd.nxv4i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma @@ -1241,7 +1243,7 @@ entry: %0, i16 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1251,9 +1253,9 @@ declare @llvm.riscv.vmadd.mask.nxv4i16.i16( i16, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu @@ -1265,7 +1267,7 @@ entry: i16 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1274,10 +1276,10 @@ declare @llvm.riscv.vmadd.nxv8i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma @@ -1288,7 +1290,7 @@ entry: %0, i16 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1298,9 +1300,9 @@ declare @llvm.riscv.vmadd.mask.nxv8i16.i16( i16, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu @@ -1312,7 +1314,7 @@ entry: i16 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1321,10 +1323,10 @@ declare @llvm.riscv.vmadd.nxv16i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma @@ -1335,7 +1337,7 @@ entry: %0, i16 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1345,9 +1347,9 @@ declare @llvm.riscv.vmadd.mask.nxv16i16.i16( i16, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu @@ -1359,7 +1361,7 @@ entry: i16 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1368,10 +1370,10 @@ declare @llvm.riscv.vmadd.nxv1i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma @@ -1382,7 +1384,7 @@ entry: %0, i32 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1392,9 +1394,9 @@ declare @llvm.riscv.vmadd.mask.nxv1i32.i32( i32, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu @@ -1406,7 +1408,7 @@ entry: i32 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1415,10 +1417,10 @@ declare @llvm.riscv.vmadd.nxv2i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma @@ -1429,7 +1431,7 @@ entry: %0, i32 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1439,9 +1441,9 @@ declare @llvm.riscv.vmadd.mask.nxv2i32.i32( i32, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu @@ -1453,7 +1455,7 @@ entry: i32 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1462,10 +1464,10 @@ declare @llvm.riscv.vmadd.nxv4i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma @@ -1476,7 +1478,7 @@ entry: %0, i32 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1486,9 +1488,9 @@ declare @llvm.riscv.vmadd.mask.nxv4i32.i32( i32, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu @@ -1500,7 +1502,7 @@ entry: i32 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1509,10 +1511,10 @@ declare @llvm.riscv.vmadd.nxv8i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma @@ -1523,7 +1525,7 @@ entry: %0, i32 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1533,9 +1535,9 @@ declare @llvm.riscv.vmadd.mask.nxv8i32.i32( i32, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu @@ -1547,7 +1549,7 @@ entry: i32 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1556,28 +1558,34 @@ declare @llvm.riscv.vmadd.nxv1i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, ma -; CHECK-NEXT: vmadd.vv v8, v10, v9 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma +; RV32-NEXT: vmadd.vv v8, v10, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; RV64-NEXT: vmadd.vx v8, a0, v9 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmadd.nxv1i64.i64( %0, i64 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1587,28 +1595,34 @@ declare @llvm.riscv.vmadd.mask.nxv1i64.i64( i64, , , - i32, i32); - -define @intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu -; CHECK-NEXT: vmadd.vv v8, v10, v9, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, iXLen); + +define @intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV32-NEXT: vmadd.vv v8, v10, v9, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vmadd.vx v8, a0, v9, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmadd.mask.nxv1i64.i64( %0, i64 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1617,28 +1631,34 @@ declare @llvm.riscv.vmadd.nxv2i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, ma -; CHECK-NEXT: vmadd.vv v8, v12, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, ma +; RV32-NEXT: vmadd.vv v8, v12, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma +; RV64-NEXT: vmadd.vx v8, a0, v10 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmadd.nxv2i64.i64( %0, i64 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1648,28 +1668,34 @@ declare @llvm.riscv.vmadd.mask.nxv2i64.i64( i64, , , - i32, i32); - -define @intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu -; CHECK-NEXT: vmadd.vv v8, v12, v10, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, iXLen); + +define @intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; RV32-NEXT: vmadd.vv v8, v12, v10, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; RV64-NEXT: vmadd.vx v8, a0, v10, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmadd.mask.nxv2i64.i64( %0, i64 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1678,28 +1704,34 @@ declare @llvm.riscv.vmadd.nxv4i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, ma -; CHECK-NEXT: vmadd.vv v8, v16, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, ma +; RV32-NEXT: vmadd.vv v8, v16, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma +; RV64-NEXT: vmadd.vx v8, a0, v12 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmadd.nxv4i64.i64( %0, i64 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1709,28 +1741,34 @@ declare @llvm.riscv.vmadd.mask.nxv4i64.i64( i64, , , - i32, i32); - -define @intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu -; CHECK-NEXT: vmadd.vv v8, v16, v12, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, iXLen); + +define @intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; RV32-NEXT: vmadd.vv v8, v16, v12, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; RV64-NEXT: vmadd.vx v8, a0, v12, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmadd.mask.nxv4i64.i64( %0, i64 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll deleted file mode 100644 index abb8abf..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll +++ /dev/null @@ -1,2122 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmax.nxv1i8.nxv1i8( - , - , - , - i32); - -define @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmax.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv1i8.nxv1i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv1i8.nxv1i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv1i8.nxv1i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv2i8.nxv2i8( - , - , - , - i32); - -define @intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmax.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv2i8.nxv2i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv2i8.nxv2i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv2i8.nxv2i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv4i8.nxv4i8( - , - , - , - i32); - -define @intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmax.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv4i8.nxv4i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv4i8.nxv4i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv4i8.nxv4i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv8i8.nxv8i8( - , - , - , - i32); - -define @intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmax.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv8i8.nxv8i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv8i8.nxv8i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv8i8.nxv8i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv16i8.nxv16i8( - , - , - , - i32); - -define @intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmax.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv16i8.nxv16i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv16i8.nxv16i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmax.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv16i8.nxv16i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv32i8.nxv32i8( - , - , - , - i32); - -define @intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmax.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv32i8.nxv32i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv32i8.nxv32i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmax.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv32i8.nxv32i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv64i8.nxv64i8( - , - , - , - i32); - -define @intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vmax.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv64i8.nxv64i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv64i8.nxv64i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv64i8.nxv64i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv1i16.nxv1i16( - , - , - , - i32); - -define @intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmax.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv1i16.nxv1i16( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv1i16.nxv1i16( - , - , - , - , - i32, - i32); - -define @intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv1i16.nxv1i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv2i16.nxv2i16( - , - , - , - i32); - -define @intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmax.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv2i16.nxv2i16( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv2i16.nxv2i16( - , - , - , - , - i32, - i32); - -define @intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv2i16.nxv2i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv4i16.nxv4i16( - , - , - , - i32); - -define @intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmax.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv4i16.nxv4i16( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv4i16.nxv4i16( - , - , - , - , - i32, - i32); - -define @intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv4i16.nxv4i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv8i16.nxv8i16( - , - , - , - i32); - -define @intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmax.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv8i16.nxv8i16( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv8i16.nxv8i16( - , - , - , - , - i32, - i32); - -define @intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmax.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv8i16.nxv8i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv16i16.nxv16i16( - , - , - , - i32); - -define @intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmax.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv16i16.nxv16i16( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv16i16.nxv16i16( - , - , - , - , - i32, - i32); - -define @intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmax.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv16i16.nxv16i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv32i16.nxv32i16( - , - , - , - i32); - -define @intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vmax.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv32i16.nxv32i16( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv32i16.nxv32i16( - , - , - , - , - i32, - i32); - -define @intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv32i16.nxv32i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv1i32.nxv1i32( - , - , - , - i32); - -define @intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmax.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv1i32.nxv1i32( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv1i32.nxv1i32( - , - , - , - , - i32, - i32); - -define @intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv2i32.nxv2i32( - , - , - , - i32); - -define @intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmax.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv2i32.nxv2i32( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv2i32.nxv2i32( - , - , - , - , - i32, - i32); - -define @intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv2i32.nxv2i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv4i32.nxv4i32( - , - , - , - i32); - -define @intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmax.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv4i32.nxv4i32( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv4i32.nxv4i32( - , - , - , - , - i32, - i32); - -define @intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmax.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv4i32.nxv4i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv8i32.nxv8i32( - , - , - , - i32); - -define @intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmax.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv8i32.nxv8i32( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv8i32.nxv8i32( - , - , - , - , - i32, - i32); - -define @intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmax.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv8i32.nxv8i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv16i32.nxv16i32( - , - , - , - i32); - -define @intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vmax.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv16i32.nxv16i32( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv16i32.nxv16i32( - , - , - , - , - i32, - i32); - -define @intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv16i32.nxv16i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv1i64.nxv1i64( - , - , - , - i32); - -define @intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmax.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv1i64.nxv1i64( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv1i64.nxv1i64( - , - , - , - , - i32, - i32); - -define @intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv1i64.nxv1i64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv2i64.nxv2i64( - , - , - , - i32); - -define @intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmax.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv2i64.nxv2i64( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv2i64.nxv2i64( - , - , - , - , - i32, - i32); - -define @intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmax.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv2i64.nxv2i64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv4i64.nxv4i64( - , - , - , - i32); - -define @intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmax.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv4i64.nxv4i64( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv4i64.nxv4i64( - , - , - , - , - i32, - i32); - -define @intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmax.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv4i64.nxv4i64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv8i64.nxv8i64( - , - , - , - i32); - -define @intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmax.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv8i64.nxv8i64( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv8i64.nxv8i64( - , - , - , - , - i32, - i32); - -define @intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv8i64.nxv8i64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv1i8.i8( - , - , - i8, - i32); - -define @intrinsic_vmax_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vx_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vmax.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv1i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv1i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv1i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv2i8.i8( - , - , - i8, - i32); - -define @intrinsic_vmax_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vx_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vmax.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv2i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv2i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv2i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv4i8.i8( - , - , - i8, - i32); - -define @intrinsic_vmax_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vx_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vmax.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv4i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv4i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv4i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv8i8.i8( - , - , - i8, - i32); - -define @intrinsic_vmax_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vx_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vmax.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv8i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv8i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv8i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv16i8.i8( - , - , - i8, - i32); - -define @intrinsic_vmax_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vx_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vmax.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv16i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv16i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vmax.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv16i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv32i8.i8( - , - , - i8, - i32); - -define @intrinsic_vmax_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vx_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vmax.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv32i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv32i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vmax.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv32i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv64i8.i8( - , - , - i8, - i32); - -define @intrinsic_vmax_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vx_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vmax.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv64i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv64i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vmax.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv64i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv1i16.i16( - , - , - i16, - i32); - -define @intrinsic_vmax_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vx_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vmax.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv1i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv1i16.i16( - , - , - i16, - , - i32, - i32); - -define @intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv1i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv2i16.i16( - , - , - i16, - i32); - -define @intrinsic_vmax_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vx_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vmax.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv2i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv2i16.i16( - , - , - i16, - , - i32, - i32); - -define @intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv2i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv4i16.i16( - , - , - i16, - i32); - -define @intrinsic_vmax_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vx_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vmax.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv4i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv4i16.i16( - , - , - i16, - , - i32, - i32); - -define @intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv4i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv8i16.i16( - , - , - i16, - i32); - -define @intrinsic_vmax_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vx_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vmax.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv8i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv8i16.i16( - , - , - i16, - , - i32, - i32); - -define @intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vmax.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv8i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv16i16.i16( - , - , - i16, - i32); - -define @intrinsic_vmax_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vx_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vmax.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv16i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv16i16.i16( - , - , - i16, - , - i32, - i32); - -define @intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vmax.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv16i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv32i16.i16( - , - , - i16, - i32); - -define @intrinsic_vmax_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vx_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vmax.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv32i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv32i16.i16( - , - , - i16, - , - i32, - i32); - -define @intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vmax.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv32i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv1i32.i32( - , - , - i32, - i32); - -define @intrinsic_vmax_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vx_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vmax.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv1i32.i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv1i32.i32( - , - , - i32, - , - i32, - i32); - -define @intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv1i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv2i32.i32( - , - , - i32, - i32); - -define @intrinsic_vmax_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vx_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vmax.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv2i32.i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv2i32.i32( - , - , - i32, - , - i32, - i32); - -define @intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv2i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv4i32.i32( - , - , - i32, - i32); - -define @intrinsic_vmax_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vx_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vmax.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv4i32.i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv4i32.i32( - , - , - i32, - , - i32, - i32); - -define @intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vmax.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv4i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv8i32.i32( - , - , - i32, - i32); - -define @intrinsic_vmax_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vx_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vmax.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv8i32.i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv8i32.i32( - , - , - i32, - , - i32, - i32); - -define @intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vmax.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv8i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv16i32.i32( - , - , - i32, - i32); - -define @intrinsic_vmax_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vx_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vmax.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv16i32.i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv16i32.i32( - , - , - i32, - , - i32, - i32); - -define @intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vmax.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv16i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv1i64.i64( - , - , - i64, - i32); - -define @intrinsic_vmax_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vmax.vv v8, v8, v9 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv1i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv1i64.i64( - , - , - i64, - , - i32, - i32); - -define @intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv2i64.i64( - , - , - i64, - i32); - -define @intrinsic_vmax_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vmax.vv v8, v8, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv2i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv2i64.i64( - , - , - i64, - , - i32, - i32); - -define @intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmax.vv v8, v10, v12, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv4i64.i64( - , - , - i64, - i32); - -define @intrinsic_vmax_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmax.vv v8, v8, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv4i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv4i64.i64( - , - , - i64, - , - i32, - i32); - -define @intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vmax.vv v8, v12, v16, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmax.nxv8i64.i64( - , - , - i64, - i32); - -define @intrinsic_vmax_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vmax.vv v8, v8, v16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.nxv8i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmax.mask.nxv8i64.i64( - , - , - i64, - , - i32, - i32); - -define @intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmax.mask.nxv8i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4, i32 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmax.ll similarity index 80% rename from llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vmax.ll index 5982198..6fd448c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vmax.nxv1i8.nxv1i8( , , , - i64); + iXLen); -define @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -18,7 +20,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -28,10 +30,10 @@ declare @llvm.riscv.vmax.mask.nxv1i8.nxv1i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -43,7 +45,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -52,9 +54,9 @@ declare @llvm.riscv.vmax.nxv2i8.nxv2i8( , , , - i64); + iXLen); -define @intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -65,7 +67,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -75,10 +77,10 @@ declare @llvm.riscv.vmax.mask.nxv2i8.nxv2i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -90,7 +92,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -99,9 +101,9 @@ declare @llvm.riscv.vmax.nxv4i8.nxv4i8( , , , - i64); + iXLen); -define @intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -112,7 +114,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -122,10 +124,10 @@ declare @llvm.riscv.vmax.mask.nxv4i8.nxv4i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -137,7 +139,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -146,9 +148,9 @@ declare @llvm.riscv.vmax.nxv8i8.nxv8i8( , , , - i64); + iXLen); -define @intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -159,7 +161,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -169,10 +171,10 @@ declare @llvm.riscv.vmax.mask.nxv8i8.nxv8i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -184,7 +186,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -193,9 +195,9 @@ declare @llvm.riscv.vmax.nxv16i8.nxv16i8( , , , - i64); + iXLen); -define @intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -206,7 +208,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -216,10 +218,10 @@ declare @llvm.riscv.vmax.mask.nxv16i8.nxv16i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -231,7 +233,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -240,9 +242,9 @@ declare @llvm.riscv.vmax.nxv32i8.nxv32i8( , , , - i64); + iXLen); -define @intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -253,7 +255,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -263,10 +265,10 @@ declare @llvm.riscv.vmax.mask.nxv32i8.nxv32i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -278,7 +280,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -287,9 +289,9 @@ declare @llvm.riscv.vmax.nxv64i8.nxv64i8( , , , - i64); + iXLen); -define @intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma @@ -300,7 +302,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vmax.mask.nxv64i8.nxv64i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) @@ -326,7 +328,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -335,9 +337,9 @@ declare @llvm.riscv.vmax.nxv1i16.nxv1i16( , , , - i64); + iXLen); -define @intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -348,7 +350,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -358,10 +360,10 @@ declare @llvm.riscv.vmax.mask.nxv1i16.nxv1i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -373,7 +375,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -382,9 +384,9 @@ declare @llvm.riscv.vmax.nxv2i16.nxv2i16( , , , - i64); + iXLen); -define @intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -395,7 +397,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -405,10 +407,10 @@ declare @llvm.riscv.vmax.mask.nxv2i16.nxv2i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -420,7 +422,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -429,9 +431,9 @@ declare @llvm.riscv.vmax.nxv4i16.nxv4i16( , , , - i64); + iXLen); -define @intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -442,7 +444,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -452,10 +454,10 @@ declare @llvm.riscv.vmax.mask.nxv4i16.nxv4i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -467,7 +469,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -476,9 +478,9 @@ declare @llvm.riscv.vmax.nxv8i16.nxv8i16( , , , - i64); + iXLen); -define @intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -489,7 +491,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -499,10 +501,10 @@ declare @llvm.riscv.vmax.mask.nxv8i16.nxv8i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -514,7 +516,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -523,9 +525,9 @@ declare @llvm.riscv.vmax.nxv16i16.nxv16i16( , , , - i64); + iXLen); -define @intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -536,7 +538,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -546,10 +548,10 @@ declare @llvm.riscv.vmax.mask.nxv16i16.nxv16i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -561,7 +563,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -570,9 +572,9 @@ declare @llvm.riscv.vmax.nxv32i16.nxv32i16( , , , - i64); + iXLen); -define @intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma @@ -583,7 +585,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -593,10 +595,10 @@ declare @llvm.riscv.vmax.mask.nxv32i16.nxv32i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -609,7 +611,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -618,9 +620,9 @@ declare @llvm.riscv.vmax.nxv1i32.nxv1i32( , , , - i64); + iXLen); -define @intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -631,7 +633,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -641,10 +643,10 @@ declare @llvm.riscv.vmax.mask.nxv1i32.nxv1i32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -656,7 +658,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -665,9 +667,9 @@ declare @llvm.riscv.vmax.nxv2i32.nxv2i32( , , , - i64); + iXLen); -define @intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -678,7 +680,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -688,10 +690,10 @@ declare @llvm.riscv.vmax.mask.nxv2i32.nxv2i32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -703,7 +705,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -712,9 +714,9 @@ declare @llvm.riscv.vmax.nxv4i32.nxv4i32( , , , - i64); + iXLen); -define @intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -725,7 +727,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -735,10 +737,10 @@ declare @llvm.riscv.vmax.mask.nxv4i32.nxv4i32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -750,7 +752,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -759,9 +761,9 @@ declare @llvm.riscv.vmax.nxv8i32.nxv8i32( , , , - i64); + iXLen); -define @intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -772,7 +774,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -782,10 +784,10 @@ declare @llvm.riscv.vmax.mask.nxv8i32.nxv8i32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -797,7 +799,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -806,9 +808,9 @@ declare @llvm.riscv.vmax.nxv16i32.nxv16i32( , , , - i64); + iXLen); -define @intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -819,7 +821,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -829,10 +831,10 @@ declare @llvm.riscv.vmax.mask.nxv16i32.nxv16i32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -845,7 +847,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -854,9 +856,9 @@ declare @llvm.riscv.vmax.nxv1i64.nxv1i64( , , , - i64); + iXLen); -define @intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -867,7 +869,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -877,10 +879,10 @@ declare @llvm.riscv.vmax.mask.nxv1i64.nxv1i64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -892,7 +894,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -901,9 +903,9 @@ declare @llvm.riscv.vmax.nxv2i64.nxv2i64( , , , - i64); + iXLen); -define @intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -914,7 +916,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -924,10 +926,10 @@ declare @llvm.riscv.vmax.mask.nxv2i64.nxv2i64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -939,7 +941,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vmax.nxv4i64.nxv4i64( , , , - i64); + iXLen); -define @intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -961,7 +963,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -971,10 +973,10 @@ declare @llvm.riscv.vmax.mask.nxv4i64.nxv4i64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -986,7 +988,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -995,9 +997,9 @@ declare @llvm.riscv.vmax.nxv8i64.nxv8i64( , , , - i64); + iXLen); -define @intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma @@ -1008,7 +1010,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -1018,10 +1020,10 @@ declare @llvm.riscv.vmax.mask.nxv8i64.nxv8i64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -1034,7 +1036,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1043,9 +1045,9 @@ declare @llvm.riscv.vmax.nxv1i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vmax_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmax_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -1056,7 +1058,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1066,10 +1068,10 @@ declare @llvm.riscv.vmax.mask.nxv1i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu @@ -1081,7 +1083,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1090,9 +1092,9 @@ declare @llvm.riscv.vmax.nxv2i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vmax_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmax_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -1103,7 +1105,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1113,10 +1115,10 @@ declare @llvm.riscv.vmax.mask.nxv2i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu @@ -1128,7 +1130,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1137,9 +1139,9 @@ declare @llvm.riscv.vmax.nxv4i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vmax_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmax_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -1150,7 +1152,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1160,10 +1162,10 @@ declare @llvm.riscv.vmax.mask.nxv4i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu @@ -1175,7 +1177,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1184,9 +1186,9 @@ declare @llvm.riscv.vmax.nxv8i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vmax_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmax_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -1197,7 +1199,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1207,10 +1209,10 @@ declare @llvm.riscv.vmax.mask.nxv8i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu @@ -1222,7 +1224,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1231,9 +1233,9 @@ declare @llvm.riscv.vmax.nxv16i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vmax_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmax_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -1244,7 +1246,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1254,10 +1256,10 @@ declare @llvm.riscv.vmax.mask.nxv16i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu @@ -1269,7 +1271,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1278,9 +1280,9 @@ declare @llvm.riscv.vmax.nxv32i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vmax_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmax_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -1291,7 +1293,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1301,10 +1303,10 @@ declare @llvm.riscv.vmax.mask.nxv32i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu @@ -1316,7 +1318,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1325,9 +1327,9 @@ declare @llvm.riscv.vmax.nxv64i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vmax_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmax_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma @@ -1338,7 +1340,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1348,10 +1350,10 @@ declare @llvm.riscv.vmax.mask.nxv64i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu @@ -1363,7 +1365,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1372,9 +1374,9 @@ declare @llvm.riscv.vmax.nxv1i16.i16( , , i16, - i64); + iXLen); -define @intrinsic_vmax_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmax_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -1385,7 +1387,7 @@ entry: undef, %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1395,10 +1397,10 @@ declare @llvm.riscv.vmax.mask.nxv1i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu @@ -1410,7 +1412,7 @@ entry: %1, i16 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1419,9 +1421,9 @@ declare @llvm.riscv.vmax.nxv2i16.i16( , , i16, - i64); + iXLen); -define @intrinsic_vmax_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmax_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -1432,7 +1434,7 @@ entry: undef, %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1442,10 +1444,10 @@ declare @llvm.riscv.vmax.mask.nxv2i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu @@ -1457,7 +1459,7 @@ entry: %1, i16 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1466,9 +1468,9 @@ declare @llvm.riscv.vmax.nxv4i16.i16( , , i16, - i64); + iXLen); -define @intrinsic_vmax_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmax_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -1479,7 +1481,7 @@ entry: undef, %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1489,10 +1491,10 @@ declare @llvm.riscv.vmax.mask.nxv4i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -1504,7 +1506,7 @@ entry: %1, i16 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1513,9 +1515,9 @@ declare @llvm.riscv.vmax.nxv8i16.i16( , , i16, - i64); + iXLen); -define @intrinsic_vmax_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmax_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -1526,7 +1528,7 @@ entry: undef, %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1536,10 +1538,10 @@ declare @llvm.riscv.vmax.mask.nxv8i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -1551,7 +1553,7 @@ entry: %1, i16 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1560,9 +1562,9 @@ declare @llvm.riscv.vmax.nxv16i16.i16( , , i16, - i64); + iXLen); -define @intrinsic_vmax_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmax_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -1573,7 +1575,7 @@ entry: undef, %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1583,10 +1585,10 @@ declare @llvm.riscv.vmax.mask.nxv16i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu @@ -1598,7 +1600,7 @@ entry: %1, i16 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1607,9 +1609,9 @@ declare @llvm.riscv.vmax.nxv32i16.i16( , , i16, - i64); + iXLen); -define @intrinsic_vmax_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmax_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma @@ -1620,7 +1622,7 @@ entry: undef, %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1630,10 +1632,10 @@ declare @llvm.riscv.vmax.mask.nxv32i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu @@ -1645,7 +1647,7 @@ entry: %1, i16 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1654,9 +1656,9 @@ declare @llvm.riscv.vmax.nxv1i32.i32( , , i32, - i64); + iXLen); -define @intrinsic_vmax_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vmax_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -1667,7 +1669,7 @@ entry: undef, %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1677,10 +1679,10 @@ declare @llvm.riscv.vmax.mask.nxv1i32.i32( , i32, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu @@ -1692,7 +1694,7 @@ entry: %1, i32 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1701,9 +1703,9 @@ declare @llvm.riscv.vmax.nxv2i32.i32( , , i32, - i64); + iXLen); -define @intrinsic_vmax_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vmax_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -1714,7 +1716,7 @@ entry: undef, %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1724,10 +1726,10 @@ declare @llvm.riscv.vmax.mask.nxv2i32.i32( , i32, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu @@ -1739,7 +1741,7 @@ entry: %1, i32 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1748,9 +1750,9 @@ declare @llvm.riscv.vmax.nxv4i32.i32( , , i32, - i64); + iXLen); -define @intrinsic_vmax_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vmax_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -1761,7 +1763,7 @@ entry: undef, %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1771,10 +1773,10 @@ declare @llvm.riscv.vmax.mask.nxv4i32.i32( , i32, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -1786,7 +1788,7 @@ entry: %1, i32 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1795,9 +1797,9 @@ declare @llvm.riscv.vmax.nxv8i32.i32( , , i32, - i64); + iXLen); -define @intrinsic_vmax_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vmax_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -1808,7 +1810,7 @@ entry: undef, %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1818,10 +1820,10 @@ declare @llvm.riscv.vmax.mask.nxv8i32.i32( , i32, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -1833,7 +1835,7 @@ entry: %1, i32 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1842,9 +1844,9 @@ declare @llvm.riscv.vmax.nxv16i32.i32( , , i32, - i64); + iXLen); -define @intrinsic_vmax_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vmax_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma @@ -1855,7 +1857,7 @@ entry: undef, %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1865,10 +1867,10 @@ declare @llvm.riscv.vmax.mask.nxv16i32.i32( , i32, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu @@ -1880,7 +1882,7 @@ entry: %1, i32 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1889,20 +1891,32 @@ declare @llvm.riscv.vmax.nxv1i64.i64( , , i64, - i64); - -define @intrinsic_vmax_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vmax.vx v8, v8, a0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmax_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmax_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vmax.vv v8, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmax_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmax.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv1i64.i64( undef, %0, i64 %1, - i64 %2) + iXLen %2) ret %a } @@ -1912,22 +1926,34 @@ declare @llvm.riscv.vmax.mask.nxv1i64.i64( , i64, , - i64, - i64); - -define @intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vmax.vv v8, v9, v10, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vmax.vx v8, v9, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmax.mask.nxv1i64.i64( %0, %1, i64 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1936,20 +1962,32 @@ declare @llvm.riscv.vmax.nxv2i64.i64( , , i64, - i64); - -define @intrinsic_vmax_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vmax.vx v8, v8, a0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmax_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmax_vx_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vmax.vv v8, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmax_vx_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vmax.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv2i64.i64( undef, %0, i64 %1, - i64 %2) + iXLen %2) ret %a } @@ -1959,22 +1997,34 @@ declare @llvm.riscv.vmax.mask.nxv2i64.i64( , i64, , - i64, - i64); - -define @intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vmax.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmax.vv v8, v10, v12, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vmax.vx v8, v10, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmax.mask.nxv2i64.i64( %0, %1, i64 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1983,20 +2033,32 @@ declare @llvm.riscv.vmax.nxv4i64.i64( , , i64, - i64); - -define @intrinsic_vmax_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vmax.vx v8, v8, a0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmax_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmax_vx_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmax.vv v8, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmax_vx_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmax.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv4i64.i64( undef, %0, i64 %1, - i64 %2) + iXLen %2) ret %a } @@ -2006,22 +2068,34 @@ declare @llvm.riscv.vmax.mask.nxv4i64.i64( , i64, , - i64, - i64); - -define @intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vmax.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vmax.vv v8, v12, v16, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vmax.vx v8, v12, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmax.mask.nxv4i64.i64( %0, %1, i64 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -2030,20 +2104,32 @@ declare @llvm.riscv.vmax.nxv8i64.i64( , , i64, - i64); - -define @intrinsic_vmax_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmax_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vmax.vx v8, v8, a0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmax_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmax_vx_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vmax.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmax_vx_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmax.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv8i64.i64( undef, %0, i64 %1, - i64 %2) + iXLen %2) ret %a } @@ -2053,22 +2139,34 @@ declare @llvm.riscv.vmax.mask.nxv8i64.i64( , i64, , - i64, - i64); - -define @intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vmax.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vmax.vv v8, v16, v24, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vmax.vx v8, v16, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmax.mask.nxv8i64.i64( %0, %1, i64 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll deleted file mode 100644 index 0567a21b..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll +++ /dev/null @@ -1,2122 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmaxu.nxv1i8.nxv1i8( - , - , - , - i32); - -define @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmaxu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv1i8.nxv1i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv2i8.nxv2i8( - , - , - , - i32); - -define @intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmaxu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv2i8.nxv2i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv4i8.nxv4i8( - , - , - , - i32); - -define @intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmaxu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv4i8.nxv4i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv8i8.nxv8i8( - , - , - , - i32); - -define @intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmaxu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv8i8.nxv8i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv16i8.nxv16i8( - , - , - , - i32); - -define @intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmaxu.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv16i8.nxv16i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmaxu.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv32i8.nxv32i8( - , - , - , - i32); - -define @intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmaxu.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv32i8.nxv32i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmaxu.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv64i8.nxv64i8( - , - , - , - i32); - -define @intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vmaxu.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv64i8.nxv64i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv1i16.nxv1i16( - , - , - , - i32); - -define @intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmaxu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv1i16.nxv1i16( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16( - , - , - , - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv2i16.nxv2i16( - , - , - , - i32); - -define @intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmaxu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv2i16.nxv2i16( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16( - , - , - , - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv4i16.nxv4i16( - , - , - , - i32); - -define @intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmaxu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv4i16.nxv4i16( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16( - , - , - , - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv8i16.nxv8i16( - , - , - , - i32); - -define @intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmaxu.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv8i16.nxv8i16( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16( - , - , - , - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmaxu.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv16i16.nxv16i16( - , - , - , - i32); - -define @intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmaxu.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv16i16.nxv16i16( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16( - , - , - , - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmaxu.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv32i16.nxv32i16( - , - , - , - i32); - -define @intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vmaxu.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv32i16.nxv32i16( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16( - , - , - , - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv1i32.nxv1i32( - , - , - , - i32); - -define @intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmaxu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32( - , - , - , - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv2i32.nxv2i32( - , - , - , - i32); - -define @intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmaxu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv2i32.nxv2i32( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32( - , - , - , - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv4i32.nxv4i32( - , - , - , - i32); - -define @intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmaxu.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv4i32.nxv4i32( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32( - , - , - , - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmaxu.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv8i32.nxv8i32( - , - , - , - i32); - -define @intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmaxu.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv8i32.nxv8i32( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32( - , - , - , - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmaxu.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv16i32.nxv16i32( - , - , - , - i32); - -define @intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vmaxu.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv16i32.nxv16i32( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32( - , - , - , - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv1i64.nxv1i64( - , - , - , - i32); - -define @intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmaxu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv1i64.nxv1i64( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64( - , - , - , - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv2i64.nxv2i64( - , - , - , - i32); - -define @intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmaxu.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv2i64.nxv2i64( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64( - , - , - , - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmaxu.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv4i64.nxv4i64( - , - , - , - i32); - -define @intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmaxu.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv4i64.nxv4i64( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64( - , - , - , - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmaxu.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv8i64.nxv8i64( - , - , - , - i32); - -define @intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmaxu.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv8i64.nxv8i64( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64( - , - , - , - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv1i8.i8( - , - , - i8, - i32); - -define @intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vmaxu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv1i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv1i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv1i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv2i8.i8( - , - , - i8, - i32); - -define @intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vmaxu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv2i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv2i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv2i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv4i8.i8( - , - , - i8, - i32); - -define @intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vmaxu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv4i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv4i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv4i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv8i8.i8( - , - , - i8, - i32); - -define @intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vmaxu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv8i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv8i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv8i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv16i8.i8( - , - , - i8, - i32); - -define @intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vmaxu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv16i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv16i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vmaxu.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv16i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv32i8.i8( - , - , - i8, - i32); - -define @intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vmaxu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv32i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv32i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vmaxu.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv32i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv64i8.i8( - , - , - i8, - i32); - -define @intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vmaxu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv64i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv64i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vmaxu.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv64i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv1i16.i16( - , - , - i16, - i32); - -define @intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vmaxu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv1i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv1i16.i16( - , - , - i16, - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv1i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv2i16.i16( - , - , - i16, - i32); - -define @intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vmaxu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv2i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv2i16.i16( - , - , - i16, - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv2i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv4i16.i16( - , - , - i16, - i32); - -define @intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vmaxu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv4i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv4i16.i16( - , - , - i16, - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv4i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv8i16.i16( - , - , - i16, - i32); - -define @intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vmaxu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv8i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv8i16.i16( - , - , - i16, - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vmaxu.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv8i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv16i16.i16( - , - , - i16, - i32); - -define @intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vmaxu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv16i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv16i16.i16( - , - , - i16, - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vmaxu.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv16i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv32i16.i16( - , - , - i16, - i32); - -define @intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vmaxu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv32i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv32i16.i16( - , - , - i16, - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vmaxu.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv32i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv1i32.i32( - , - , - i32, - i32); - -define @intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vmaxu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv1i32.i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv1i32.i32( - , - , - i32, - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv1i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv2i32.i32( - , - , - i32, - i32); - -define @intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vmaxu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv2i32.i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv2i32.i32( - , - , - i32, - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv2i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv4i32.i32( - , - , - i32, - i32); - -define @intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vmaxu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv4i32.i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv4i32.i32( - , - , - i32, - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vmaxu.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv4i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv8i32.i32( - , - , - i32, - i32); - -define @intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vmaxu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv8i32.i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv8i32.i32( - , - , - i32, - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vmaxu.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv8i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv16i32.i32( - , - , - i32, - i32); - -define @intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vmaxu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv16i32.i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv16i32.i32( - , - , - i32, - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vmaxu.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv16i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv1i64.i64( - , - , - i64, - i32); - -define @intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vmaxu.vv v8, v8, v9 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv1i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv1i64.i64( - , - , - i64, - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv2i64.i64( - , - , - i64, - i32); - -define @intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vmaxu.vv v8, v8, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv2i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv2i64.i64( - , - , - i64, - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmaxu.vv v8, v10, v12, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv4i64.i64( - , - , - i64, - i32); - -define @intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmaxu.vv v8, v8, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv4i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv4i64.i64( - , - , - i64, - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vmaxu.vv v8, v12, v16, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vmaxu.nxv8i64.i64( - , - , - i64, - i32); - -define @intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vmaxu.vv v8, v8, v16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.nxv8i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmaxu.mask.nxv8i64.i64( - , - , - i64, - , - i32, - i32); - -define @intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmaxu.mask.nxv8i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4, i32 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu.ll similarity index 80% rename from llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vmaxu.ll index 7ee7fa3..cea7f93 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vmaxu.nxv1i8.nxv1i8( , , , - i64); + iXLen); -define @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -18,7 +20,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -28,10 +30,10 @@ declare @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -43,7 +45,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -52,9 +54,9 @@ declare @llvm.riscv.vmaxu.nxv2i8.nxv2i8( , , , - i64); + iXLen); -define @intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -65,7 +67,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -75,10 +77,10 @@ declare @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -90,7 +92,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -99,9 +101,9 @@ declare @llvm.riscv.vmaxu.nxv4i8.nxv4i8( , , , - i64); + iXLen); -define @intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -112,7 +114,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -122,10 +124,10 @@ declare @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -137,7 +139,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -146,9 +148,9 @@ declare @llvm.riscv.vmaxu.nxv8i8.nxv8i8( , , , - i64); + iXLen); -define @intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -159,7 +161,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -169,10 +171,10 @@ declare @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -184,7 +186,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -193,9 +195,9 @@ declare @llvm.riscv.vmaxu.nxv16i8.nxv16i8( , , , - i64); + iXLen); -define @intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -206,7 +208,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -216,10 +218,10 @@ declare @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -231,7 +233,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -240,9 +242,9 @@ declare @llvm.riscv.vmaxu.nxv32i8.nxv32i8( , , , - i64); + iXLen); -define @intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -253,7 +255,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -263,10 +265,10 @@ declare @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -278,7 +280,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -287,9 +289,9 @@ declare @llvm.riscv.vmaxu.nxv64i8.nxv64i8( , , , - i64); + iXLen); -define @intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma @@ -300,7 +302,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) @@ -326,7 +328,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -335,9 +337,9 @@ declare @llvm.riscv.vmaxu.nxv1i16.nxv1i16( , , , - i64); + iXLen); -define @intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -348,7 +350,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -358,10 +360,10 @@ declare @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -373,7 +375,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -382,9 +384,9 @@ declare @llvm.riscv.vmaxu.nxv2i16.nxv2i16( , , , - i64); + iXLen); -define @intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -395,7 +397,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -405,10 +407,10 @@ declare @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -420,7 +422,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -429,9 +431,9 @@ declare @llvm.riscv.vmaxu.nxv4i16.nxv4i16( , , , - i64); + iXLen); -define @intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -442,7 +444,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -452,10 +454,10 @@ declare @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -467,7 +469,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -476,9 +478,9 @@ declare @llvm.riscv.vmaxu.nxv8i16.nxv8i16( , , , - i64); + iXLen); -define @intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -489,7 +491,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -499,10 +501,10 @@ declare @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -514,7 +516,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -523,9 +525,9 @@ declare @llvm.riscv.vmaxu.nxv16i16.nxv16i16( , , , - i64); + iXLen); -define @intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -536,7 +538,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -546,10 +548,10 @@ declare @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -561,7 +563,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -570,9 +572,9 @@ declare @llvm.riscv.vmaxu.nxv32i16.nxv32i16( , , , - i64); + iXLen); -define @intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma @@ -583,7 +585,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -593,10 +595,10 @@ declare @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -609,7 +611,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -618,9 +620,9 @@ declare @llvm.riscv.vmaxu.nxv1i32.nxv1i32( , , , - i64); + iXLen); -define @intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -631,7 +633,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -641,10 +643,10 @@ declare @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -656,7 +658,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -665,9 +667,9 @@ declare @llvm.riscv.vmaxu.nxv2i32.nxv2i32( , , , - i64); + iXLen); -define @intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -678,7 +680,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -688,10 +690,10 @@ declare @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -703,7 +705,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -712,9 +714,9 @@ declare @llvm.riscv.vmaxu.nxv4i32.nxv4i32( , , , - i64); + iXLen); -define @intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -725,7 +727,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -735,10 +737,10 @@ declare @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -750,7 +752,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -759,9 +761,9 @@ declare @llvm.riscv.vmaxu.nxv8i32.nxv8i32( , , , - i64); + iXLen); -define @intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -772,7 +774,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -782,10 +784,10 @@ declare @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -797,7 +799,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -806,9 +808,9 @@ declare @llvm.riscv.vmaxu.nxv16i32.nxv16i32( , , , - i64); + iXLen); -define @intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -819,7 +821,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -829,10 +831,10 @@ declare @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -845,7 +847,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -854,9 +856,9 @@ declare @llvm.riscv.vmaxu.nxv1i64.nxv1i64( , , , - i64); + iXLen); -define @intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -867,7 +869,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -877,10 +879,10 @@ declare @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -892,7 +894,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -901,9 +903,9 @@ declare @llvm.riscv.vmaxu.nxv2i64.nxv2i64( , , , - i64); + iXLen); -define @intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -914,7 +916,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -924,10 +926,10 @@ declare @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -939,7 +941,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vmaxu.nxv4i64.nxv4i64( , , , - i64); + iXLen); -define @intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -961,7 +963,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -971,10 +973,10 @@ declare @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -986,7 +988,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -995,9 +997,9 @@ declare @llvm.riscv.vmaxu.nxv8i64.nxv8i64( , , , - i64); + iXLen); -define @intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma @@ -1008,7 +1010,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -1018,10 +1020,10 @@ declare @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -1034,7 +1036,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1043,9 +1045,9 @@ declare @llvm.riscv.vmaxu.nxv1i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -1056,7 +1058,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1066,10 +1068,10 @@ declare @llvm.riscv.vmaxu.mask.nxv1i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu @@ -1081,7 +1083,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1090,9 +1092,9 @@ declare @llvm.riscv.vmaxu.nxv2i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -1103,7 +1105,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1113,10 +1115,10 @@ declare @llvm.riscv.vmaxu.mask.nxv2i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu @@ -1128,7 +1130,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1137,9 +1139,9 @@ declare @llvm.riscv.vmaxu.nxv4i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -1150,7 +1152,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1160,10 +1162,10 @@ declare @llvm.riscv.vmaxu.mask.nxv4i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu @@ -1175,7 +1177,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1184,9 +1186,9 @@ declare @llvm.riscv.vmaxu.nxv8i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -1197,7 +1199,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1207,10 +1209,10 @@ declare @llvm.riscv.vmaxu.mask.nxv8i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu @@ -1222,7 +1224,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1231,9 +1233,9 @@ declare @llvm.riscv.vmaxu.nxv16i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -1244,7 +1246,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1254,10 +1256,10 @@ declare @llvm.riscv.vmaxu.mask.nxv16i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu @@ -1269,7 +1271,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1278,9 +1280,9 @@ declare @llvm.riscv.vmaxu.nxv32i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -1291,7 +1293,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1301,10 +1303,10 @@ declare @llvm.riscv.vmaxu.mask.nxv32i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu @@ -1316,7 +1318,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1325,9 +1327,9 @@ declare @llvm.riscv.vmaxu.nxv64i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma @@ -1338,7 +1340,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1348,10 +1350,10 @@ declare @llvm.riscv.vmaxu.mask.nxv64i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu @@ -1363,7 +1365,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1372,9 +1374,9 @@ declare @llvm.riscv.vmaxu.nxv1i16.i16( , , i16, - i64); + iXLen); -define @intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -1385,7 +1387,7 @@ entry: undef, %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1395,10 +1397,10 @@ declare @llvm.riscv.vmaxu.mask.nxv1i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu @@ -1410,7 +1412,7 @@ entry: %1, i16 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1419,9 +1421,9 @@ declare @llvm.riscv.vmaxu.nxv2i16.i16( , , i16, - i64); + iXLen); -define @intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -1432,7 +1434,7 @@ entry: undef, %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1442,10 +1444,10 @@ declare @llvm.riscv.vmaxu.mask.nxv2i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu @@ -1457,7 +1459,7 @@ entry: %1, i16 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1466,9 +1468,9 @@ declare @llvm.riscv.vmaxu.nxv4i16.i16( , , i16, - i64); + iXLen); -define @intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -1479,7 +1481,7 @@ entry: undef, %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1489,10 +1491,10 @@ declare @llvm.riscv.vmaxu.mask.nxv4i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -1504,7 +1506,7 @@ entry: %1, i16 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1513,9 +1515,9 @@ declare @llvm.riscv.vmaxu.nxv8i16.i16( , , i16, - i64); + iXLen); -define @intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -1526,7 +1528,7 @@ entry: undef, %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1536,10 +1538,10 @@ declare @llvm.riscv.vmaxu.mask.nxv8i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -1551,7 +1553,7 @@ entry: %1, i16 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1560,9 +1562,9 @@ declare @llvm.riscv.vmaxu.nxv16i16.i16( , , i16, - i64); + iXLen); -define @intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -1573,7 +1575,7 @@ entry: undef, %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1583,10 +1585,10 @@ declare @llvm.riscv.vmaxu.mask.nxv16i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu @@ -1598,7 +1600,7 @@ entry: %1, i16 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1607,9 +1609,9 @@ declare @llvm.riscv.vmaxu.nxv32i16.i16( , , i16, - i64); + iXLen); -define @intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma @@ -1620,7 +1622,7 @@ entry: undef, %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1630,10 +1632,10 @@ declare @llvm.riscv.vmaxu.mask.nxv32i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu @@ -1645,7 +1647,7 @@ entry: %1, i16 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1654,9 +1656,9 @@ declare @llvm.riscv.vmaxu.nxv1i32.i32( , , i32, - i64); + iXLen); -define @intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -1667,7 +1669,7 @@ entry: undef, %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1677,10 +1679,10 @@ declare @llvm.riscv.vmaxu.mask.nxv1i32.i32( , i32, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu @@ -1692,7 +1694,7 @@ entry: %1, i32 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1701,9 +1703,9 @@ declare @llvm.riscv.vmaxu.nxv2i32.i32( , , i32, - i64); + iXLen); -define @intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -1714,7 +1716,7 @@ entry: undef, %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1724,10 +1726,10 @@ declare @llvm.riscv.vmaxu.mask.nxv2i32.i32( , i32, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu @@ -1739,7 +1741,7 @@ entry: %1, i32 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1748,9 +1750,9 @@ declare @llvm.riscv.vmaxu.nxv4i32.i32( , , i32, - i64); + iXLen); -define @intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -1761,7 +1763,7 @@ entry: undef, %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1771,10 +1773,10 @@ declare @llvm.riscv.vmaxu.mask.nxv4i32.i32( , i32, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -1786,7 +1788,7 @@ entry: %1, i32 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1795,9 +1797,9 @@ declare @llvm.riscv.vmaxu.nxv8i32.i32( , , i32, - i64); + iXLen); -define @intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -1808,7 +1810,7 @@ entry: undef, %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1818,10 +1820,10 @@ declare @llvm.riscv.vmaxu.mask.nxv8i32.i32( , i32, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -1833,7 +1835,7 @@ entry: %1, i32 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1842,9 +1844,9 @@ declare @llvm.riscv.vmaxu.nxv16i32.i32( , , i32, - i64); + iXLen); -define @intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma @@ -1855,7 +1857,7 @@ entry: undef, %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1865,10 +1867,10 @@ declare @llvm.riscv.vmaxu.mask.nxv16i32.i32( , i32, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu @@ -1880,7 +1882,7 @@ entry: %1, i32 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1889,20 +1891,32 @@ declare @llvm.riscv.vmaxu.nxv1i64.i64( , , i64, - i64); - -define @intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vmaxu.vx v8, v8, a0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vmaxu.vv v8, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmaxu.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv1i64.i64( undef, %0, i64 %1, - i64 %2) + iXLen %2) ret %a } @@ -1912,22 +1926,34 @@ declare @llvm.riscv.vmaxu.mask.nxv1i64.i64( , i64, , - i64, - i64); - -define @intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vmaxu.vv v8, v9, v10, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vmaxu.vx v8, v9, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.mask.nxv1i64.i64( %0, %1, i64 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1936,20 +1962,32 @@ declare @llvm.riscv.vmaxu.nxv2i64.i64( , , i64, - i64); - -define @intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vmaxu.vx v8, v8, a0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vmaxu.vv v8, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vmaxu.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv2i64.i64( undef, %0, i64 %1, - i64 %2) + iXLen %2) ret %a } @@ -1959,22 +1997,34 @@ declare @llvm.riscv.vmaxu.mask.nxv2i64.i64( , i64, , - i64, - i64); - -define @intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vmaxu.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmaxu.vv v8, v10, v12, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vmaxu.vx v8, v10, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.mask.nxv2i64.i64( %0, %1, i64 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1983,20 +2033,32 @@ declare @llvm.riscv.vmaxu.nxv4i64.i64( , , i64, - i64); - -define @intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vmaxu.vx v8, v8, a0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmaxu.vv v8, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmaxu.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv4i64.i64( undef, %0, i64 %1, - i64 %2) + iXLen %2) ret %a } @@ -2006,22 +2068,34 @@ declare @llvm.riscv.vmaxu.mask.nxv4i64.i64( , i64, , - i64, - i64); - -define @intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vmaxu.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vmaxu.vv v8, v12, v16, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vmaxu.vx v8, v12, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.mask.nxv4i64.i64( %0, %1, i64 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -2030,20 +2104,32 @@ declare @llvm.riscv.vmaxu.nxv8i64.i64( , , i64, - i64); - -define @intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vmaxu.vx v8, v8, a0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vmaxu.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmaxu.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv8i64.i64( undef, %0, i64 %1, - i64 %2) + iXLen %2) ret %a } @@ -2053,22 +2139,34 @@ declare @llvm.riscv.vmaxu.mask.nxv8i64.i64( , i64, , - i64, - i64); - -define @intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vmaxu.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vmaxu.vv v8, v16, v24, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vmaxu.vx v8, v16, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.mask.nxv8i64.i64( %0, %1, i64 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll deleted file mode 100644 index 4fd2f65..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll +++ /dev/null @@ -1,2074 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmin.nxv1i8.nxv1i8( - , - , - , - i64); - -define @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmin.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv1i8.nxv1i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv1i8.nxv1i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv2i8.nxv2i8( - , - , - , - i64); - -define @intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmin.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv2i8.nxv2i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv2i8.nxv2i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv2i8.nxv2i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv4i8.nxv4i8( - , - , - , - i64); - -define @intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmin.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv4i8.nxv4i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv4i8.nxv4i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv4i8.nxv4i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv8i8.nxv8i8( - , - , - , - i64); - -define @intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmin.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv8i8.nxv8i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv8i8.nxv8i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv8i8.nxv8i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv16i8.nxv16i8( - , - , - , - i64); - -define @intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmin.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv16i8.nxv16i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv16i8.nxv16i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmin.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv16i8.nxv16i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv32i8.nxv32i8( - , - , - , - i64); - -define @intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmin.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv32i8.nxv32i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv32i8.nxv32i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmin.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv32i8.nxv32i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv64i8.nxv64i8( - , - , - , - i64); - -define @intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vmin.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv64i8.nxv64i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv64i8.nxv64i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv64i8.nxv64i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv1i16.nxv1i16( - , - , - , - i64); - -define @intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmin.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv1i16.nxv1i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv1i16.nxv1i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv1i16.nxv1i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv2i16.nxv2i16( - , - , - , - i64); - -define @intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmin.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv2i16.nxv2i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv2i16.nxv2i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv2i16.nxv2i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv4i16.nxv4i16( - , - , - , - i64); - -define @intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmin.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv4i16.nxv4i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv4i16.nxv4i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv4i16.nxv4i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv8i16.nxv8i16( - , - , - , - i64); - -define @intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmin.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv8i16.nxv8i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv8i16.nxv8i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmin.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv8i16.nxv8i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv16i16.nxv16i16( - , - , - , - i64); - -define @intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmin.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv16i16.nxv16i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv16i16.nxv16i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmin.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv16i16.nxv16i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv32i16.nxv32i16( - , - , - , - i64); - -define @intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vmin.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv32i16.nxv32i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv32i16.nxv32i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv32i16.nxv32i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv1i32.nxv1i32( - , - , - , - i64); - -define @intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmin.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv1i32.nxv1i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv1i32.nxv1i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv2i32.nxv2i32( - , - , - , - i64); - -define @intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmin.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv2i32.nxv2i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv2i32.nxv2i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv2i32.nxv2i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv4i32.nxv4i32( - , - , - , - i64); - -define @intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmin.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv4i32.nxv4i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv4i32.nxv4i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmin.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv4i32.nxv4i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv8i32.nxv8i32( - , - , - , - i64); - -define @intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmin.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv8i32.nxv8i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv8i32.nxv8i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmin.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv8i32.nxv8i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv16i32.nxv16i32( - , - , - , - i64); - -define @intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vmin.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv16i32.nxv16i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv16i32.nxv16i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv16i32.nxv16i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv1i64.nxv1i64( - , - , - , - i64); - -define @intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmin.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv1i64.nxv1i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv1i64.nxv1i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv1i64.nxv1i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv2i64.nxv2i64( - , - , - , - i64); - -define @intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmin.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv2i64.nxv2i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv2i64.nxv2i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmin.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv2i64.nxv2i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv4i64.nxv4i64( - , - , - , - i64); - -define @intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmin.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv4i64.nxv4i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv4i64.nxv4i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmin.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv4i64.nxv4i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv8i64.nxv8i64( - , - , - , - i64); - -define @intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmin.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv8i64.nxv8i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv8i64.nxv8i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv8i64.nxv8i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv1i8.i8( - , - , - i8, - i64); - -define @intrinsic_vmin_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vx_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vmin.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv1i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv1i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv1i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv2i8.i8( - , - , - i8, - i64); - -define @intrinsic_vmin_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vx_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vmin.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv2i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv2i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv2i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv4i8.i8( - , - , - i8, - i64); - -define @intrinsic_vmin_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vx_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vmin.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv4i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv4i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv4i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv8i8.i8( - , - , - i8, - i64); - -define @intrinsic_vmin_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vx_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vmin.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv8i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv8i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv8i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv16i8.i8( - , - , - i8, - i64); - -define @intrinsic_vmin_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vx_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vmin.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv16i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv16i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vmin.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv16i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv32i8.i8( - , - , - i8, - i64); - -define @intrinsic_vmin_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vx_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vmin.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv32i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv32i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vmin.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv32i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv64i8.i8( - , - , - i8, - i64); - -define @intrinsic_vmin_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vx_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vmin.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv64i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv64i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vmin.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv64i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv1i16.i16( - , - , - i16, - i64); - -define @intrinsic_vmin_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vx_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vmin.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv1i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv1i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv1i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv2i16.i16( - , - , - i16, - i64); - -define @intrinsic_vmin_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vx_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vmin.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv2i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv2i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv2i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv4i16.i16( - , - , - i16, - i64); - -define @intrinsic_vmin_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vx_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vmin.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv4i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv4i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv4i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv8i16.i16( - , - , - i16, - i64); - -define @intrinsic_vmin_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vx_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vmin.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv8i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv8i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vmin.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv8i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv16i16.i16( - , - , - i16, - i64); - -define @intrinsic_vmin_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vx_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vmin.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv16i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv16i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vmin.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv16i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv32i16.i16( - , - , - i16, - i64); - -define @intrinsic_vmin_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vx_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vmin.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv32i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv32i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vmin.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv32i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv1i32.i32( - , - , - i32, - i64); - -define @intrinsic_vmin_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vx_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vmin.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv1i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv1i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv1i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv2i32.i32( - , - , - i32, - i64); - -define @intrinsic_vmin_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vx_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vmin.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv2i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv2i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv2i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv4i32.i32( - , - , - i32, - i64); - -define @intrinsic_vmin_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vx_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vmin.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv4i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv4i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vmin.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv4i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv8i32.i32( - , - , - i32, - i64); - -define @intrinsic_vmin_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vx_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vmin.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv8i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv8i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vmin.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv8i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv16i32.i32( - , - , - i32, - i64); - -define @intrinsic_vmin_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vx_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vmin.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv16i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv16i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vmin.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv16i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv1i64.i64( - , - , - i64, - i64); - -define @intrinsic_vmin_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vmin.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv1i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv1i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv2i64.i64( - , - , - i64, - i64); - -define @intrinsic_vmin_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vmin.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv2i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv2i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vmin.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv4i64.i64( - , - , - i64, - i64); - -define @intrinsic_vmin_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vmin.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv4i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv4i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vmin.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vmin.nxv8i64.i64( - , - , - i64, - i64); - -define @intrinsic_vmin_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vmin.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.nxv8i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmin.mask.nxv8i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vmin.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmin.mask.nxv8i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmin.ll similarity index 80% rename from llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vmin.ll index 26cc6d9..c70107b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vmin.nxv1i8.nxv1i8( , , , - i32); + iXLen); -define @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -18,7 +20,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -28,10 +30,10 @@ declare @llvm.riscv.vmin.mask.nxv1i8.nxv1i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -43,7 +45,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -52,9 +54,9 @@ declare @llvm.riscv.vmin.nxv2i8.nxv2i8( , , , - i32); + iXLen); -define @intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -65,7 +67,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -75,10 +77,10 @@ declare @llvm.riscv.vmin.mask.nxv2i8.nxv2i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -90,7 +92,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -99,9 +101,9 @@ declare @llvm.riscv.vmin.nxv4i8.nxv4i8( , , , - i32); + iXLen); -define @intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -112,7 +114,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -122,10 +124,10 @@ declare @llvm.riscv.vmin.mask.nxv4i8.nxv4i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -137,7 +139,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -146,9 +148,9 @@ declare @llvm.riscv.vmin.nxv8i8.nxv8i8( , , , - i32); + iXLen); -define @intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -159,7 +161,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -169,10 +171,10 @@ declare @llvm.riscv.vmin.mask.nxv8i8.nxv8i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -184,7 +186,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -193,9 +195,9 @@ declare @llvm.riscv.vmin.nxv16i8.nxv16i8( , , , - i32); + iXLen); -define @intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -206,7 +208,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -216,10 +218,10 @@ declare @llvm.riscv.vmin.mask.nxv16i8.nxv16i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -231,7 +233,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -240,9 +242,9 @@ declare @llvm.riscv.vmin.nxv32i8.nxv32i8( , , , - i32); + iXLen); -define @intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -253,7 +255,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -263,10 +265,10 @@ declare @llvm.riscv.vmin.mask.nxv32i8.nxv32i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -278,7 +280,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -287,9 +289,9 @@ declare @llvm.riscv.vmin.nxv64i8.nxv64i8( , , , - i32); + iXLen); -define @intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma @@ -300,7 +302,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vmin.mask.nxv64i8.nxv64i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) @@ -326,7 +328,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -335,9 +337,9 @@ declare @llvm.riscv.vmin.nxv1i16.nxv1i16( , , , - i32); + iXLen); -define @intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -348,7 +350,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -358,10 +360,10 @@ declare @llvm.riscv.vmin.mask.nxv1i16.nxv1i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -373,7 +375,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -382,9 +384,9 @@ declare @llvm.riscv.vmin.nxv2i16.nxv2i16( , , , - i32); + iXLen); -define @intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -395,7 +397,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -405,10 +407,10 @@ declare @llvm.riscv.vmin.mask.nxv2i16.nxv2i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -420,7 +422,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -429,9 +431,9 @@ declare @llvm.riscv.vmin.nxv4i16.nxv4i16( , , , - i32); + iXLen); -define @intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -442,7 +444,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -452,10 +454,10 @@ declare @llvm.riscv.vmin.mask.nxv4i16.nxv4i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -467,7 +469,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -476,9 +478,9 @@ declare @llvm.riscv.vmin.nxv8i16.nxv8i16( , , , - i32); + iXLen); -define @intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -489,7 +491,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -499,10 +501,10 @@ declare @llvm.riscv.vmin.mask.nxv8i16.nxv8i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -514,7 +516,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -523,9 +525,9 @@ declare @llvm.riscv.vmin.nxv16i16.nxv16i16( , , , - i32); + iXLen); -define @intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -536,7 +538,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -546,10 +548,10 @@ declare @llvm.riscv.vmin.mask.nxv16i16.nxv16i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -561,7 +563,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -570,9 +572,9 @@ declare @llvm.riscv.vmin.nxv32i16.nxv32i16( , , , - i32); + iXLen); -define @intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma @@ -583,7 +585,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -593,10 +595,10 @@ declare @llvm.riscv.vmin.mask.nxv32i16.nxv32i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -609,7 +611,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -618,9 +620,9 @@ declare @llvm.riscv.vmin.nxv1i32.nxv1i32( , , , - i32); + iXLen); -define @intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -631,7 +633,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -641,10 +643,10 @@ declare @llvm.riscv.vmin.mask.nxv1i32.nxv1i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -656,7 +658,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -665,9 +667,9 @@ declare @llvm.riscv.vmin.nxv2i32.nxv2i32( , , , - i32); + iXLen); -define @intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -678,7 +680,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -688,10 +690,10 @@ declare @llvm.riscv.vmin.mask.nxv2i32.nxv2i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -703,7 +705,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -712,9 +714,9 @@ declare @llvm.riscv.vmin.nxv4i32.nxv4i32( , , , - i32); + iXLen); -define @intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -725,7 +727,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -735,10 +737,10 @@ declare @llvm.riscv.vmin.mask.nxv4i32.nxv4i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -750,7 +752,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -759,9 +761,9 @@ declare @llvm.riscv.vmin.nxv8i32.nxv8i32( , , , - i32); + iXLen); -define @intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -772,7 +774,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -782,10 +784,10 @@ declare @llvm.riscv.vmin.mask.nxv8i32.nxv8i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -797,7 +799,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -806,9 +808,9 @@ declare @llvm.riscv.vmin.nxv16i32.nxv16i32( , , , - i32); + iXLen); -define @intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -819,7 +821,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -829,10 +831,10 @@ declare @llvm.riscv.vmin.mask.nxv16i32.nxv16i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -845,7 +847,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -854,9 +856,9 @@ declare @llvm.riscv.vmin.nxv1i64.nxv1i64( , , , - i32); + iXLen); -define @intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -867,7 +869,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -877,10 +879,10 @@ declare @llvm.riscv.vmin.mask.nxv1i64.nxv1i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -892,7 +894,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -901,9 +903,9 @@ declare @llvm.riscv.vmin.nxv2i64.nxv2i64( , , , - i32); + iXLen); -define @intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -914,7 +916,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -924,10 +926,10 @@ declare @llvm.riscv.vmin.mask.nxv2i64.nxv2i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -939,7 +941,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vmin.nxv4i64.nxv4i64( , , , - i32); + iXLen); -define @intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -961,7 +963,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -971,10 +973,10 @@ declare @llvm.riscv.vmin.mask.nxv4i64.nxv4i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -986,7 +988,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -995,9 +997,9 @@ declare @llvm.riscv.vmin.nxv8i64.nxv8i64( , , , - i32); + iXLen); -define @intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma @@ -1008,7 +1010,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -1018,10 +1020,10 @@ declare @llvm.riscv.vmin.mask.nxv8i64.nxv8i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -1034,7 +1036,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1043,9 +1045,9 @@ declare @llvm.riscv.vmin.nxv1i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vmin_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmin_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -1056,7 +1058,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1066,10 +1068,10 @@ declare @llvm.riscv.vmin.mask.nxv1i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu @@ -1081,7 +1083,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1090,9 +1092,9 @@ declare @llvm.riscv.vmin.nxv2i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vmin_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmin_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -1103,7 +1105,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1113,10 +1115,10 @@ declare @llvm.riscv.vmin.mask.nxv2i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu @@ -1128,7 +1130,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1137,9 +1139,9 @@ declare @llvm.riscv.vmin.nxv4i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vmin_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmin_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -1150,7 +1152,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1160,10 +1162,10 @@ declare @llvm.riscv.vmin.mask.nxv4i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu @@ -1175,7 +1177,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1184,9 +1186,9 @@ declare @llvm.riscv.vmin.nxv8i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vmin_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmin_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -1197,7 +1199,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1207,10 +1209,10 @@ declare @llvm.riscv.vmin.mask.nxv8i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu @@ -1222,7 +1224,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1231,9 +1233,9 @@ declare @llvm.riscv.vmin.nxv16i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vmin_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmin_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -1244,7 +1246,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1254,10 +1256,10 @@ declare @llvm.riscv.vmin.mask.nxv16i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu @@ -1269,7 +1271,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1278,9 +1280,9 @@ declare @llvm.riscv.vmin.nxv32i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vmin_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmin_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -1291,7 +1293,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1301,10 +1303,10 @@ declare @llvm.riscv.vmin.mask.nxv32i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu @@ -1316,7 +1318,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1325,9 +1327,9 @@ declare @llvm.riscv.vmin.nxv64i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vmin_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmin_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma @@ -1338,7 +1340,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1348,10 +1350,10 @@ declare @llvm.riscv.vmin.mask.nxv64i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu @@ -1363,7 +1365,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1372,9 +1374,9 @@ declare @llvm.riscv.vmin.nxv1i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vmin_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmin_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -1385,7 +1387,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1395,10 +1397,10 @@ declare @llvm.riscv.vmin.mask.nxv1i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu @@ -1410,7 +1412,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1419,9 +1421,9 @@ declare @llvm.riscv.vmin.nxv2i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vmin_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmin_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -1432,7 +1434,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1442,10 +1444,10 @@ declare @llvm.riscv.vmin.mask.nxv2i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu @@ -1457,7 +1459,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1466,9 +1468,9 @@ declare @llvm.riscv.vmin.nxv4i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vmin_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmin_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -1479,7 +1481,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1489,10 +1491,10 @@ declare @llvm.riscv.vmin.mask.nxv4i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -1504,7 +1506,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1513,9 +1515,9 @@ declare @llvm.riscv.vmin.nxv8i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vmin_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmin_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -1526,7 +1528,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1536,10 +1538,10 @@ declare @llvm.riscv.vmin.mask.nxv8i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -1551,7 +1553,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1560,9 +1562,9 @@ declare @llvm.riscv.vmin.nxv16i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vmin_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmin_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -1573,7 +1575,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1583,10 +1585,10 @@ declare @llvm.riscv.vmin.mask.nxv16i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu @@ -1598,7 +1600,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1607,9 +1609,9 @@ declare @llvm.riscv.vmin.nxv32i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vmin_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmin_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma @@ -1620,7 +1622,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1630,10 +1632,10 @@ declare @llvm.riscv.vmin.mask.nxv32i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu @@ -1645,7 +1647,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1654,9 +1656,9 @@ declare @llvm.riscv.vmin.nxv1i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vmin_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmin_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -1667,7 +1669,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1677,10 +1679,10 @@ declare @llvm.riscv.vmin.mask.nxv1i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu @@ -1692,7 +1694,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1701,9 +1703,9 @@ declare @llvm.riscv.vmin.nxv2i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vmin_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmin_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -1714,7 +1716,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1724,10 +1726,10 @@ declare @llvm.riscv.vmin.mask.nxv2i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu @@ -1739,7 +1741,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1748,9 +1750,9 @@ declare @llvm.riscv.vmin.nxv4i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vmin_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmin_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -1761,7 +1763,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1771,10 +1773,10 @@ declare @llvm.riscv.vmin.mask.nxv4i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -1786,7 +1788,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1795,9 +1797,9 @@ declare @llvm.riscv.vmin.nxv8i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vmin_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmin_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -1808,7 +1810,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1818,10 +1820,10 @@ declare @llvm.riscv.vmin.mask.nxv8i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -1833,7 +1835,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1842,9 +1844,9 @@ declare @llvm.riscv.vmin.nxv16i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vmin_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmin_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma @@ -1855,7 +1857,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1865,10 +1867,10 @@ declare @llvm.riscv.vmin.mask.nxv16i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu @@ -1880,7 +1882,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1889,26 +1891,32 @@ declare @llvm.riscv.vmin.nxv1i64.i64( , , i64, - i32); - -define @intrinsic_vmin_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vmin.vv v8, v8, v9 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmin_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmin_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vmin.vv v8, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmin_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmin.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv1i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1918,28 +1926,34 @@ declare @llvm.riscv.vmin.mask.nxv1i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vmin.vv v8, v9, v10, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vmin.vx v8, v9, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmin.mask.nxv1i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1948,26 +1962,32 @@ declare @llvm.riscv.vmin.nxv2i64.i64( , , i64, - i32); - -define @intrinsic_vmin_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vmin.vv v8, v8, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmin_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmin_vx_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vmin.vv v8, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmin_vx_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vmin.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv2i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1977,28 +1997,34 @@ declare @llvm.riscv.vmin.mask.nxv2i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmin.vv v8, v10, v12, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmin.vv v8, v10, v12, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vmin.vx v8, v10, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmin.mask.nxv2i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -2007,26 +2033,32 @@ declare @llvm.riscv.vmin.nxv4i64.i64( , , i64, - i32); - -define @intrinsic_vmin_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmin.vv v8, v8, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmin_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmin_vx_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmin.vv v8, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmin_vx_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmin.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv4i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -2036,28 +2068,34 @@ declare @llvm.riscv.vmin.mask.nxv4i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vmin.vv v8, v12, v16, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vmin.vv v8, v12, v16, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vmin.vx v8, v12, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmin.mask.nxv4i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -2066,26 +2104,32 @@ declare @llvm.riscv.vmin.nxv8i64.i64( , , i64, - i32); - -define @intrinsic_vmin_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmin_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vmin.vv v8, v8, v16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmin_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmin_vx_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vmin.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmin_vx_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmin.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv8i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -2095,28 +2139,34 @@ declare @llvm.riscv.vmin.mask.nxv8i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vmin.vv v8, v16, v24, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vmin.vx v8, v16, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmin.mask.nxv8i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll deleted file mode 100644 index 6bccfb2..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll +++ /dev/null @@ -1,2074 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vminu.nxv1i8.nxv1i8( - , - , - , - i64); - -define @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vminu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv1i8.nxv1i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv1i8.nxv1i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv1i8.nxv1i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv2i8.nxv2i8( - , - , - , - i64); - -define @intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vminu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv2i8.nxv2i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv2i8.nxv2i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv2i8.nxv2i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv4i8.nxv4i8( - , - , - , - i64); - -define @intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vminu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv4i8.nxv4i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv4i8.nxv4i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv4i8.nxv4i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv8i8.nxv8i8( - , - , - , - i64); - -define @intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vminu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv8i8.nxv8i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv8i8.nxv8i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv8i8.nxv8i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv16i8.nxv16i8( - , - , - , - i64); - -define @intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vminu.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv16i8.nxv16i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv16i8.nxv16i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vminu.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv16i8.nxv16i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv32i8.nxv32i8( - , - , - , - i64); - -define @intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vminu.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv32i8.nxv32i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv32i8.nxv32i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vminu.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv32i8.nxv32i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv64i8.nxv64i8( - , - , - , - i64); - -define @intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vminu.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv64i8.nxv64i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv64i8.nxv64i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv64i8.nxv64i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv1i16.nxv1i16( - , - , - , - i64); - -define @intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vminu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv1i16.nxv1i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv1i16.nxv1i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv1i16.nxv1i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv2i16.nxv2i16( - , - , - , - i64); - -define @intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vminu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv2i16.nxv2i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv2i16.nxv2i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv2i16.nxv2i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv4i16.nxv4i16( - , - , - , - i64); - -define @intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vminu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv4i16.nxv4i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv4i16.nxv4i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv4i16.nxv4i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv8i16.nxv8i16( - , - , - , - i64); - -define @intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vminu.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv8i16.nxv8i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv8i16.nxv8i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vminu.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv8i16.nxv8i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv16i16.nxv16i16( - , - , - , - i64); - -define @intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vminu.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv16i16.nxv16i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv16i16.nxv16i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vminu.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv16i16.nxv16i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv32i16.nxv32i16( - , - , - , - i64); - -define @intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vminu.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv32i16.nxv32i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv32i16.nxv32i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv32i16.nxv32i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv1i32.nxv1i32( - , - , - , - i64); - -define @intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vminu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv1i32.nxv1i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv1i32.nxv1i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv2i32.nxv2i32( - , - , - , - i64); - -define @intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vminu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv2i32.nxv2i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv2i32.nxv2i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv2i32.nxv2i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv4i32.nxv4i32( - , - , - , - i64); - -define @intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vminu.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv4i32.nxv4i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv4i32.nxv4i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vminu.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv4i32.nxv4i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv8i32.nxv8i32( - , - , - , - i64); - -define @intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vminu.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv8i32.nxv8i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv8i32.nxv8i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vminu.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv8i32.nxv8i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv16i32.nxv16i32( - , - , - , - i64); - -define @intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vminu.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv16i32.nxv16i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv16i32.nxv16i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv16i32.nxv16i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv1i64.nxv1i64( - , - , - , - i64); - -define @intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vminu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv1i64.nxv1i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv1i64.nxv1i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv1i64.nxv1i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv2i64.nxv2i64( - , - , - , - i64); - -define @intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vminu.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv2i64.nxv2i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv2i64.nxv2i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vminu.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv2i64.nxv2i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv4i64.nxv4i64( - , - , - , - i64); - -define @intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vminu.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv4i64.nxv4i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv4i64.nxv4i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vminu.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv4i64.nxv4i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv8i64.nxv8i64( - , - , - , - i64); - -define @intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vminu.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv8i64.nxv8i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv8i64.nxv8i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv8i64.nxv8i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv1i8.i8( - , - , - i8, - i64); - -define @intrinsic_vminu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vx_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vminu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv1i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv1i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv1i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv2i8.i8( - , - , - i8, - i64); - -define @intrinsic_vminu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vx_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vminu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv2i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv2i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv2i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv4i8.i8( - , - , - i8, - i64); - -define @intrinsic_vminu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vx_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vminu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv4i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv4i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv4i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv8i8.i8( - , - , - i8, - i64); - -define @intrinsic_vminu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vx_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vminu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv8i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv8i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv8i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv16i8.i8( - , - , - i8, - i64); - -define @intrinsic_vminu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vx_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vminu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv16i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv16i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vminu.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv16i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv32i8.i8( - , - , - i8, - i64); - -define @intrinsic_vminu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vx_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vminu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv32i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv32i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vminu.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv32i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv64i8.i8( - , - , - i8, - i64); - -define @intrinsic_vminu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vx_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vminu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv64i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv64i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vminu.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv64i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv1i16.i16( - , - , - i16, - i64); - -define @intrinsic_vminu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vx_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vminu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv1i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv1i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv1i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv2i16.i16( - , - , - i16, - i64); - -define @intrinsic_vminu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vx_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vminu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv2i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv2i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv2i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv4i16.i16( - , - , - i16, - i64); - -define @intrinsic_vminu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vx_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vminu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv4i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv4i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv4i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv8i16.i16( - , - , - i16, - i64); - -define @intrinsic_vminu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vx_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vminu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv8i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv8i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vminu.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv8i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv16i16.i16( - , - , - i16, - i64); - -define @intrinsic_vminu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vx_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vminu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv16i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv16i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vminu.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv16i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv32i16.i16( - , - , - i16, - i64); - -define @intrinsic_vminu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vx_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vminu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv32i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv32i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vminu.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv32i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv1i32.i32( - , - , - i32, - i64); - -define @intrinsic_vminu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vx_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vminu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv1i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv1i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv1i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv2i32.i32( - , - , - i32, - i64); - -define @intrinsic_vminu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vx_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vminu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv2i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv2i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv2i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv4i32.i32( - , - , - i32, - i64); - -define @intrinsic_vminu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vx_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vminu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv4i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv4i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vminu.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv4i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv8i32.i32( - , - , - i32, - i64); - -define @intrinsic_vminu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vx_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vminu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv8i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv8i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vminu.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv8i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv16i32.i32( - , - , - i32, - i64); - -define @intrinsic_vminu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vx_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vminu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv16i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv16i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vminu.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv16i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv1i64.i64( - , - , - i64, - i64); - -define @intrinsic_vminu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vminu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv1i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv1i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv2i64.i64( - , - , - i64, - i64); - -define @intrinsic_vminu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vminu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv2i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv2i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vminu.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv4i64.i64( - , - , - i64, - i64); - -define @intrinsic_vminu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vminu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv4i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv4i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vminu.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vminu.nxv8i64.i64( - , - , - i64, - i64); - -define @intrinsic_vminu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vminu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.nxv8i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vminu.mask.nxv8i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vminu.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vminu.mask.nxv8i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vminu.ll similarity index 80% rename from llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vminu.ll index a71e260..f4c6755 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vminu.nxv1i8.nxv1i8( , , , - i32); + iXLen); -define @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -18,7 +20,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -28,10 +30,10 @@ declare @llvm.riscv.vminu.mask.nxv1i8.nxv1i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -43,7 +45,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -52,9 +54,9 @@ declare @llvm.riscv.vminu.nxv2i8.nxv2i8( , , , - i32); + iXLen); -define @intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -65,7 +67,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -75,10 +77,10 @@ declare @llvm.riscv.vminu.mask.nxv2i8.nxv2i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -90,7 +92,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -99,9 +101,9 @@ declare @llvm.riscv.vminu.nxv4i8.nxv4i8( , , , - i32); + iXLen); -define @intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -112,7 +114,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -122,10 +124,10 @@ declare @llvm.riscv.vminu.mask.nxv4i8.nxv4i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -137,7 +139,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -146,9 +148,9 @@ declare @llvm.riscv.vminu.nxv8i8.nxv8i8( , , , - i32); + iXLen); -define @intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -159,7 +161,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -169,10 +171,10 @@ declare @llvm.riscv.vminu.mask.nxv8i8.nxv8i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -184,7 +186,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -193,9 +195,9 @@ declare @llvm.riscv.vminu.nxv16i8.nxv16i8( , , , - i32); + iXLen); -define @intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -206,7 +208,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -216,10 +218,10 @@ declare @llvm.riscv.vminu.mask.nxv16i8.nxv16i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -231,7 +233,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -240,9 +242,9 @@ declare @llvm.riscv.vminu.nxv32i8.nxv32i8( , , , - i32); + iXLen); -define @intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -253,7 +255,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -263,10 +265,10 @@ declare @llvm.riscv.vminu.mask.nxv32i8.nxv32i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -278,7 +280,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -287,9 +289,9 @@ declare @llvm.riscv.vminu.nxv64i8.nxv64i8( , , , - i32); + iXLen); -define @intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma @@ -300,7 +302,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vminu.mask.nxv64i8.nxv64i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) @@ -326,7 +328,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -335,9 +337,9 @@ declare @llvm.riscv.vminu.nxv1i16.nxv1i16( , , , - i32); + iXLen); -define @intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -348,7 +350,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -358,10 +360,10 @@ declare @llvm.riscv.vminu.mask.nxv1i16.nxv1i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -373,7 +375,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -382,9 +384,9 @@ declare @llvm.riscv.vminu.nxv2i16.nxv2i16( , , , - i32); + iXLen); -define @intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -395,7 +397,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -405,10 +407,10 @@ declare @llvm.riscv.vminu.mask.nxv2i16.nxv2i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -420,7 +422,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -429,9 +431,9 @@ declare @llvm.riscv.vminu.nxv4i16.nxv4i16( , , , - i32); + iXLen); -define @intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -442,7 +444,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -452,10 +454,10 @@ declare @llvm.riscv.vminu.mask.nxv4i16.nxv4i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -467,7 +469,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -476,9 +478,9 @@ declare @llvm.riscv.vminu.nxv8i16.nxv8i16( , , , - i32); + iXLen); -define @intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -489,7 +491,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -499,10 +501,10 @@ declare @llvm.riscv.vminu.mask.nxv8i16.nxv8i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -514,7 +516,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -523,9 +525,9 @@ declare @llvm.riscv.vminu.nxv16i16.nxv16i16( , , , - i32); + iXLen); -define @intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -536,7 +538,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -546,10 +548,10 @@ declare @llvm.riscv.vminu.mask.nxv16i16.nxv16i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -561,7 +563,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -570,9 +572,9 @@ declare @llvm.riscv.vminu.nxv32i16.nxv32i16( , , , - i32); + iXLen); -define @intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma @@ -583,7 +585,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -593,10 +595,10 @@ declare @llvm.riscv.vminu.mask.nxv32i16.nxv32i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -609,7 +611,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -618,9 +620,9 @@ declare @llvm.riscv.vminu.nxv1i32.nxv1i32( , , , - i32); + iXLen); -define @intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -631,7 +633,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -641,10 +643,10 @@ declare @llvm.riscv.vminu.mask.nxv1i32.nxv1i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -656,7 +658,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -665,9 +667,9 @@ declare @llvm.riscv.vminu.nxv2i32.nxv2i32( , , , - i32); + iXLen); -define @intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -678,7 +680,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -688,10 +690,10 @@ declare @llvm.riscv.vminu.mask.nxv2i32.nxv2i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -703,7 +705,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -712,9 +714,9 @@ declare @llvm.riscv.vminu.nxv4i32.nxv4i32( , , , - i32); + iXLen); -define @intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -725,7 +727,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -735,10 +737,10 @@ declare @llvm.riscv.vminu.mask.nxv4i32.nxv4i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -750,7 +752,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -759,9 +761,9 @@ declare @llvm.riscv.vminu.nxv8i32.nxv8i32( , , , - i32); + iXLen); -define @intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -772,7 +774,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -782,10 +784,10 @@ declare @llvm.riscv.vminu.mask.nxv8i32.nxv8i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -797,7 +799,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -806,9 +808,9 @@ declare @llvm.riscv.vminu.nxv16i32.nxv16i32( , , , - i32); + iXLen); -define @intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -819,7 +821,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -829,10 +831,10 @@ declare @llvm.riscv.vminu.mask.nxv16i32.nxv16i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -845,7 +847,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -854,9 +856,9 @@ declare @llvm.riscv.vminu.nxv1i64.nxv1i64( , , , - i32); + iXLen); -define @intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -867,7 +869,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -877,10 +879,10 @@ declare @llvm.riscv.vminu.mask.nxv1i64.nxv1i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -892,7 +894,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -901,9 +903,9 @@ declare @llvm.riscv.vminu.nxv2i64.nxv2i64( , , , - i32); + iXLen); -define @intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -914,7 +916,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -924,10 +926,10 @@ declare @llvm.riscv.vminu.mask.nxv2i64.nxv2i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -939,7 +941,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vminu.nxv4i64.nxv4i64( , , , - i32); + iXLen); -define @intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -961,7 +963,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -971,10 +973,10 @@ declare @llvm.riscv.vminu.mask.nxv4i64.nxv4i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -986,7 +988,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -995,9 +997,9 @@ declare @llvm.riscv.vminu.nxv8i64.nxv8i64( , , , - i32); + iXLen); -define @intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma @@ -1008,7 +1010,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -1018,10 +1020,10 @@ declare @llvm.riscv.vminu.mask.nxv8i64.nxv8i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -1034,7 +1036,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1043,9 +1045,9 @@ declare @llvm.riscv.vminu.nxv1i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vminu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vminu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -1056,7 +1058,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1066,10 +1068,10 @@ declare @llvm.riscv.vminu.mask.nxv1i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu @@ -1081,7 +1083,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1090,9 +1092,9 @@ declare @llvm.riscv.vminu.nxv2i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vminu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vminu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -1103,7 +1105,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1113,10 +1115,10 @@ declare @llvm.riscv.vminu.mask.nxv2i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu @@ -1128,7 +1130,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1137,9 +1139,9 @@ declare @llvm.riscv.vminu.nxv4i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vminu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vminu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -1150,7 +1152,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1160,10 +1162,10 @@ declare @llvm.riscv.vminu.mask.nxv4i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu @@ -1175,7 +1177,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1184,9 +1186,9 @@ declare @llvm.riscv.vminu.nxv8i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vminu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vminu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -1197,7 +1199,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1207,10 +1209,10 @@ declare @llvm.riscv.vminu.mask.nxv8i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu @@ -1222,7 +1224,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1231,9 +1233,9 @@ declare @llvm.riscv.vminu.nxv16i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vminu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vminu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -1244,7 +1246,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1254,10 +1256,10 @@ declare @llvm.riscv.vminu.mask.nxv16i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu @@ -1269,7 +1271,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1278,9 +1280,9 @@ declare @llvm.riscv.vminu.nxv32i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vminu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vminu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -1291,7 +1293,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1301,10 +1303,10 @@ declare @llvm.riscv.vminu.mask.nxv32i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu @@ -1316,7 +1318,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1325,9 +1327,9 @@ declare @llvm.riscv.vminu.nxv64i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vminu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vminu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma @@ -1338,7 +1340,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1348,10 +1350,10 @@ declare @llvm.riscv.vminu.mask.nxv64i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu @@ -1363,7 +1365,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1372,9 +1374,9 @@ declare @llvm.riscv.vminu.nxv1i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vminu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vminu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -1385,7 +1387,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1395,10 +1397,10 @@ declare @llvm.riscv.vminu.mask.nxv1i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu @@ -1410,7 +1412,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1419,9 +1421,9 @@ declare @llvm.riscv.vminu.nxv2i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vminu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vminu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -1432,7 +1434,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1442,10 +1444,10 @@ declare @llvm.riscv.vminu.mask.nxv2i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu @@ -1457,7 +1459,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1466,9 +1468,9 @@ declare @llvm.riscv.vminu.nxv4i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vminu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vminu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -1479,7 +1481,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1489,10 +1491,10 @@ declare @llvm.riscv.vminu.mask.nxv4i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -1504,7 +1506,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1513,9 +1515,9 @@ declare @llvm.riscv.vminu.nxv8i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vminu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vminu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -1526,7 +1528,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1536,10 +1538,10 @@ declare @llvm.riscv.vminu.mask.nxv8i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -1551,7 +1553,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1560,9 +1562,9 @@ declare @llvm.riscv.vminu.nxv16i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vminu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vminu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -1573,7 +1575,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1583,10 +1585,10 @@ declare @llvm.riscv.vminu.mask.nxv16i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu @@ -1598,7 +1600,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1607,9 +1609,9 @@ declare @llvm.riscv.vminu.nxv32i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vminu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vminu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma @@ -1620,7 +1622,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1630,10 +1632,10 @@ declare @llvm.riscv.vminu.mask.nxv32i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu @@ -1645,7 +1647,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1654,9 +1656,9 @@ declare @llvm.riscv.vminu.nxv1i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vminu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vminu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -1667,7 +1669,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1677,10 +1679,10 @@ declare @llvm.riscv.vminu.mask.nxv1i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu @@ -1692,7 +1694,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1701,9 +1703,9 @@ declare @llvm.riscv.vminu.nxv2i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vminu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vminu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -1714,7 +1716,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1724,10 +1726,10 @@ declare @llvm.riscv.vminu.mask.nxv2i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu @@ -1739,7 +1741,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1748,9 +1750,9 @@ declare @llvm.riscv.vminu.nxv4i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vminu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vminu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -1761,7 +1763,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1771,10 +1773,10 @@ declare @llvm.riscv.vminu.mask.nxv4i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -1786,7 +1788,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1795,9 +1797,9 @@ declare @llvm.riscv.vminu.nxv8i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vminu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vminu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -1808,7 +1810,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1818,10 +1820,10 @@ declare @llvm.riscv.vminu.mask.nxv8i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -1833,7 +1835,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1842,9 +1844,9 @@ declare @llvm.riscv.vminu.nxv16i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vminu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vminu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma @@ -1855,7 +1857,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1865,10 +1867,10 @@ declare @llvm.riscv.vminu.mask.nxv16i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu @@ -1880,7 +1882,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1889,26 +1891,32 @@ declare @llvm.riscv.vminu.nxv1i64.i64( , , i64, - i32); - -define @intrinsic_vminu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vminu.vv v8, v8, v9 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vminu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vminu_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vminu.vv v8, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vminu_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vminu.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv1i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1918,28 +1926,34 @@ declare @llvm.riscv.vminu.mask.nxv1i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vminu.vv v8, v9, v10, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vminu.vx v8, v9, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vminu.mask.nxv1i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1948,26 +1962,32 @@ declare @llvm.riscv.vminu.nxv2i64.i64( , , i64, - i32); - -define @intrinsic_vminu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vminu.vv v8, v8, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vminu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vminu_vx_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vminu.vv v8, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vminu_vx_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vminu.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv2i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1977,28 +1997,34 @@ declare @llvm.riscv.vminu.mask.nxv2i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vminu.vv v8, v10, v12, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vminu.vv v8, v10, v12, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vminu.vx v8, v10, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vminu.mask.nxv2i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -2007,26 +2033,32 @@ declare @llvm.riscv.vminu.nxv4i64.i64( , , i64, - i32); - -define @intrinsic_vminu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vminu.vv v8, v8, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vminu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vminu_vx_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vminu.vv v8, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vminu_vx_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vminu.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv4i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -2036,28 +2068,34 @@ declare @llvm.riscv.vminu.mask.nxv4i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vminu.vv v8, v12, v16, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vminu.vv v8, v12, v16, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vminu.vx v8, v12, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vminu.mask.nxv4i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -2066,26 +2104,32 @@ declare @llvm.riscv.vminu.nxv8i64.i64( , , i64, - i32); - -define @intrinsic_vminu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vminu_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vminu.vv v8, v8, v16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vminu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vminu_vx_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vminu.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vminu_vx_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vminu.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv8i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -2095,28 +2139,34 @@ declare @llvm.riscv.vminu.mask.nxv8i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vminu.vv v8, v16, v24, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vminu.vx v8, v16, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vminu.mask.nxv8i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll deleted file mode 100644 index 2b40057..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll +++ /dev/null @@ -1,882 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmsbc.nxv1i8.nxv1i8( - , - , - i64); - -define @intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmsbc.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv1i8.nxv1i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv2i8.nxv2i8( - , - , - i64); - -define @intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmsbc.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv2i8.nxv2i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv4i8.nxv4i8( - , - , - i64); - -define @intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmsbc.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv4i8.nxv4i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv8i8.nxv8i8( - , - , - i64); - -define @intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmsbc.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv8i8.nxv8i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv16i8.nxv16i8( - , - , - i64); - -define @intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmsbc.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv16i8.nxv16i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv32i8.nxv32i8( - , - , - i64); - -define @intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmsbc.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv32i8.nxv32i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv64i8.nxv64i8( - , - , - i64); - -define @intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vmsbc.vv v0, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv64i8.nxv64i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv1i16.nxv1i16( - , - , - i64); - -define @intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmsbc.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv1i16.nxv1i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv2i16.nxv2i16( - , - , - i64); - -define @intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmsbc.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv2i16.nxv2i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv4i16.nxv4i16( - , - , - i64); - -define @intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmsbc.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv4i16.nxv4i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv8i16.nxv8i16( - , - , - i64); - -define @intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmsbc.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv8i16.nxv8i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv16i16.nxv16i16( - , - , - i64); - -define @intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmsbc.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv16i16.nxv16i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv32i16.nxv32i16( - , - , - i64); - -define @intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vmsbc.vv v0, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv32i16.nxv32i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv1i32.nxv1i32( - , - , - i64); - -define @intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmsbc.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv1i32.nxv1i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv2i32.nxv2i32( - , - , - i64); - -define @intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmsbc.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv2i32.nxv2i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv4i32.nxv4i32( - , - , - i64); - -define @intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmsbc.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv4i32.nxv4i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv8i32.nxv8i32( - , - , - i64); - -define @intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmsbc.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv8i32.nxv8i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv16i32.nxv16i32( - , - , - i64); - -define @intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vmsbc.vv v0, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv16i32.nxv16i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv1i64.nxv1i64( - , - , - i64); - -define @intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmsbc.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv1i64.nxv1i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv2i64.nxv2i64( - , - , - i64); - -define @intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmsbc.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv2i64.nxv2i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv4i64.nxv4i64( - , - , - i64); - -define @intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmsbc.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv4i64.nxv4i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv8i64.nxv8i64( - , - , - i64); - -define @intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmsbc.vv v0, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv8i64.nxv8i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv1i8.i8( - , - i8, - i64); - -define @intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vmsbc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv1i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv2i8.i8( - , - i8, - i64); - -define @intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vmsbc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv2i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv4i8.i8( - , - i8, - i64); - -define @intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vmsbc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv4i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv8i8.i8( - , - i8, - i64); - -define @intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vmsbc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv8i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv16i8.i8( - , - i8, - i64); - -define @intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vmsbc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv16i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv32i8.i8( - , - i8, - i64); - -define @intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vmsbc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv32i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv64i8.i8( - , - i8, - i64); - -define @intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vmsbc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv64i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv1i16.i16( - , - i16, - i64); - -define @intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vmsbc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv1i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv2i16.i16( - , - i16, - i64); - -define @intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vmsbc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv2i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv4i16.i16( - , - i16, - i64); - -define @intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vmsbc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv4i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv8i16.i16( - , - i16, - i64); - -define @intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vmsbc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv8i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv16i16.i16( - , - i16, - i64); - -define @intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vmsbc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv16i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv32i16.i16( - , - i16, - i64); - -define @intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vmsbc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv32i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv1i32.i32( - , - i32, - i64); - -define @intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vmsbc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv1i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv2i32.i32( - , - i32, - i64); - -define @intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vmsbc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv2i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv4i32.i32( - , - i32, - i64); - -define @intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vmsbc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv4i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv8i32.i32( - , - i32, - i64); - -define @intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vmsbc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv8i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv16i32.i32( - , - i32, - i64); - -define @intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vmsbc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv16i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv1i64.i64( - , - i64, - i64); - -define @intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vmsbc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv1i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv2i64.i64( - , - i64, - i64); - -define @intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vmsbc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv2i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv4i64.i64( - , - i64, - i64); - -define @intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vmsbc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv4i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsbc.nxv8i64.i64( - , - i64, - i64); - -define @intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vmsbc.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.nxv8i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll deleted file mode 100644 index fa3a6c6..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll +++ /dev/null @@ -1,1014 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8( - , - , - , - i64); - -define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8( - , - , - , - i64); - -define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8( - , - , - , - i64); - -define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8( - , - , - , - i64); - -define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8( - , - , - , - i64); - -define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmsbc.vvm v12, v8, v10, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8( - , - , - , - i64); - -define @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmsbc.vvm v16, v8, v12, v0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8( - , - , - , - i64); - -define @intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vmsbc.vvm v24, v8, v16, v0 -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16( - , - , - , - i64); - -define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16( - , - , - , - i64); - -define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16( - , - , - , - i64); - -define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16( - , - , - , - i64); - -define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmsbc.vvm v12, v8, v10, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16( - , - , - , - i64); - -define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmsbc.vvm v16, v8, v12, v0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16( - , - , - , - i64); - -define @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vmsbc.vvm v24, v8, v16, v0 -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32( - , - , - , - i64); - -define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32( - , - , - , - i64); - -define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32( - , - , - , - i64); - -define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmsbc.vvm v12, v8, v10, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32( - , - , - , - i64); - -define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmsbc.vvm v16, v8, v12, v0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32( - , - , - , - i64); - -define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vmsbc.vvm v24, v8, v16, v0 -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64( - , - , - , - i64); - -define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64( - , - , - , - i64); - -define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmsbc.vvm v12, v8, v10, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64( - , - , - , - i64); - -define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmsbc.vvm v16, v8, v12, v0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64( - , - , - , - i64); - -define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmsbc.vvm v24, v8, v16, v0 -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64( - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8( - , - i8, - , - i64); - -define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8( - %0, - i8 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8( - , - i8, - , - i64); - -define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8( - %0, - i8 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8( - , - i8, - , - i64); - -define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8( - %0, - i8 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8( - , - i8, - , - i64); - -define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv.v.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8( - %0, - i8 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8( - , - i8, - , - i64); - -define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vmsbc.vxm v10, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8( - %0, - i8 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8( - , - i8, - , - i64); - -define @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vmsbc.vxm v12, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8( - %0, - i8 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8( - , - i8, - , - i64); - -define @intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vmsbc.vxm v16, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8( - %0, - i8 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16( - , - i16, - , - i64); - -define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16( - %0, - i16 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16( - , - i16, - , - i64); - -define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16( - %0, - i16 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16( - , - i16, - , - i64); - -define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv.v.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16( - %0, - i16 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16( - , - i16, - , - i64); - -define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vmsbc.vxm v10, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16( - %0, - i16 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16( - , - i16, - , - i64); - -define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vmsbc.vxm v12, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16( - %0, - i16 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16( - , - i16, - , - i64); - -define @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vmsbc.vxm v16, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16( - %0, - i16 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32( - , - i32, - , - i64); - -define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32( - %0, - i32 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32( - , - i32, - , - i64); - -define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv.v.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32( - %0, - i32 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32( - , - i32, - , - i64); - -define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vmsbc.vxm v10, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32( - %0, - i32 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32( - , - i32, - , - i64); - -define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vmsbc.vxm v12, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32( - %0, - i32 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32( - , - i32, - , - i64); - -define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vmsbc.vxm v16, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32( - %0, - i32 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64( - , - i64, - , - i64); - -define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv.v.v v0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64( - %0, - i64 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64( - , - i64, - , - i64); - -define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vmsbc.vxm v10, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64( - %0, - i64 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64( - , - i64, - , - i64); - -define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vmsbc.vxm v12, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64( - %0, - i64 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64( - , - i64, - , - i64); - -define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vmsbc.vxm v16, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64( - %0, - i64 %1, - %2, - i64 %3) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in.ll similarity index 83% rename from llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in.ll index d0de4e5..bac8c91 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8( , , , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -19,7 +21,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -28,9 +30,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8( , , , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -42,7 +44,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -51,9 +53,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8( , , , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -65,7 +67,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -74,9 +76,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8( , , , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -88,7 +90,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -97,9 +99,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8( , , , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -111,7 +113,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -120,9 +122,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8( , , , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -134,7 +136,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -143,9 +145,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8( , , , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma @@ -157,7 +159,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -166,9 +168,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16( , , , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -180,7 +182,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -189,9 +191,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16( , , , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -203,7 +205,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -212,9 +214,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16( , , , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -226,7 +228,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -235,9 +237,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16( , , , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -249,7 +251,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -258,9 +260,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16( , , , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -272,7 +274,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -281,9 +283,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16( , , , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma @@ -295,7 +297,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -304,9 +306,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32( , , , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -318,7 +320,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -327,9 +329,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32( , , , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -341,7 +343,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -350,9 +352,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32( , , , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -364,7 +366,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -373,9 +375,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32( , , , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -387,7 +389,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -396,9 +398,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32( , , , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -410,7 +412,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -419,9 +421,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64( , , , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -433,7 +435,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64( , , , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -456,7 +458,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -465,9 +467,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64( , , , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -479,7 +481,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64( , , , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma @@ -502,7 +504,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -511,9 +513,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -525,7 +527,7 @@ entry: %0, i8 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -548,7 +550,7 @@ entry: %0, i8 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -557,9 +559,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -571,7 +573,7 @@ entry: %0, i8 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -580,9 +582,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -594,7 +596,7 @@ entry: %0, i8 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -603,9 +605,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -617,7 +619,7 @@ entry: %0, i8 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -626,9 +628,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -640,7 +642,7 @@ entry: %0, i8 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -649,9 +651,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma @@ -663,7 +665,7 @@ entry: %0, i8 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -672,9 +674,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -686,7 +688,7 @@ entry: %0, i16 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -695,9 +697,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -709,7 +711,7 @@ entry: %0, i16 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -718,9 +720,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -732,7 +734,7 @@ entry: %0, i16 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -741,9 +743,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -755,7 +757,7 @@ entry: %0, i16 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -764,9 +766,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -778,7 +780,7 @@ entry: %0, i16 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -787,9 +789,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma @@ -801,7 +803,7 @@ entry: %0, i16 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -810,9 +812,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -824,7 +826,7 @@ entry: %0, i32 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -833,9 +835,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -847,7 +849,7 @@ entry: %0, i32 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -856,9 +858,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -870,7 +872,7 @@ entry: %0, i32 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -879,9 +881,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -893,7 +895,7 @@ entry: %0, i32 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -902,9 +904,9 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma @@ -916,7 +918,7 @@ entry: %0, i32 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -925,27 +927,34 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64( , i64, , - i32); - -define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vmsbc.vvm v9, v8, v10, v0 -; CHECK-NEXT: vmv.v.v v0, v9 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vmsbc.vvm v9, v8, v10, v0 +; RV32-NEXT: vmv.v.v v0, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmsbc.vxm v9, v8, a0, v0 +; RV64-NEXT: vmv.v.v v0, v9 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64( %0, i64 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -954,27 +963,34 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64( , i64, , - i32); - -define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmsbc.vvm v10, v8, v12, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmsbc.vvm v10, v8, v12, v0 +; RV32-NEXT: vmv1r.v v0, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vmsbc.vxm v10, v8, a0, v0 +; RV64-NEXT: vmv1r.v v0, v10 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64( %0, i64 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -983,27 +999,34 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64( , i64, , - i32); - -define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vmsbc.vvm v12, v8, v16, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vmsbc.vvm v12, v8, v16, v0 +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmsbc.vxm v12, v8, a0, v0 +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64( %0, i64 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1012,27 +1035,34 @@ declare @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64( , i64, , - i32); - -define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vmsbc.vvm v16, v8, v24, v0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vmsbc.vvm v16, v8, v24, v0 +; RV32-NEXT: vmv1r.v v0, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmsbc.vxm v16, v8, a0, v0 +; RV64-NEXT: vmv1r.v v0, v16 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64( %0, i64 %1, %2, - i32 %3) + iXLen %3) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.ll similarity index 77% rename from llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vmsbc.ll index 0eac795..36238ce 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vmsbc.nxv1i8.nxv1i8( , , - i32); + iXLen); -define @intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv1i8.nxv1i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -24,9 +26,9 @@ entry: declare @llvm.riscv.vmsbc.nxv2i8.nxv2i8( , , - i32); + iXLen); -define @intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -36,7 +38,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv2i8.nxv2i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -44,9 +46,9 @@ entry: declare @llvm.riscv.vmsbc.nxv4i8.nxv4i8( , , - i32); + iXLen); -define @intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -56,7 +58,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv4i8.nxv4i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -64,9 +66,9 @@ entry: declare @llvm.riscv.vmsbc.nxv8i8.nxv8i8( , , - i32); + iXLen); -define @intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -76,7 +78,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv8i8.nxv8i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -84,9 +86,9 @@ entry: declare @llvm.riscv.vmsbc.nxv16i8.nxv16i8( , , - i32); + iXLen); -define @intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -96,7 +98,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv16i8.nxv16i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -104,9 +106,9 @@ entry: declare @llvm.riscv.vmsbc.nxv32i8.nxv32i8( , , - i32); + iXLen); -define @intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -116,7 +118,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv32i8.nxv32i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -124,9 +126,9 @@ entry: declare @llvm.riscv.vmsbc.nxv64i8.nxv64i8( , , - i32); + iXLen); -define @intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma @@ -136,7 +138,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv64i8.nxv64i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -144,9 +146,9 @@ entry: declare @llvm.riscv.vmsbc.nxv1i16.nxv1i16( , , - i32); + iXLen); -define @intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -156,7 +158,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv1i16.nxv1i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -164,9 +166,9 @@ entry: declare @llvm.riscv.vmsbc.nxv2i16.nxv2i16( , , - i32); + iXLen); -define @intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -176,7 +178,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv2i16.nxv2i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -184,9 +186,9 @@ entry: declare @llvm.riscv.vmsbc.nxv4i16.nxv4i16( , , - i32); + iXLen); -define @intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -196,7 +198,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv4i16.nxv4i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -204,9 +206,9 @@ entry: declare @llvm.riscv.vmsbc.nxv8i16.nxv8i16( , , - i32); + iXLen); -define @intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -216,7 +218,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv8i16.nxv8i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -224,9 +226,9 @@ entry: declare @llvm.riscv.vmsbc.nxv16i16.nxv16i16( , , - i32); + iXLen); -define @intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -236,7 +238,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv16i16.nxv16i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -244,9 +246,9 @@ entry: declare @llvm.riscv.vmsbc.nxv32i16.nxv32i16( , , - i32); + iXLen); -define @intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma @@ -256,7 +258,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv32i16.nxv32i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -264,9 +266,9 @@ entry: declare @llvm.riscv.vmsbc.nxv1i32.nxv1i32( , , - i32); + iXLen); -define @intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -276,7 +278,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv1i32.nxv1i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -284,9 +286,9 @@ entry: declare @llvm.riscv.vmsbc.nxv2i32.nxv2i32( , , - i32); + iXLen); -define @intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -296,7 +298,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv2i32.nxv2i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -304,9 +306,9 @@ entry: declare @llvm.riscv.vmsbc.nxv4i32.nxv4i32( , , - i32); + iXLen); -define @intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -316,7 +318,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv4i32.nxv4i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -324,9 +326,9 @@ entry: declare @llvm.riscv.vmsbc.nxv8i32.nxv8i32( , , - i32); + iXLen); -define @intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -336,7 +338,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv8i32.nxv8i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -344,9 +346,9 @@ entry: declare @llvm.riscv.vmsbc.nxv16i32.nxv16i32( , , - i32); + iXLen); -define @intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -356,7 +358,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv16i32.nxv16i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -364,9 +366,9 @@ entry: declare @llvm.riscv.vmsbc.nxv1i64.nxv1i64( , , - i32); + iXLen); -define @intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -376,7 +378,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv1i64.nxv1i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -384,9 +386,9 @@ entry: declare @llvm.riscv.vmsbc.nxv2i64.nxv2i64( , , - i32); + iXLen); -define @intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -396,7 +398,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv2i64.nxv2i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -404,9 +406,9 @@ entry: declare @llvm.riscv.vmsbc.nxv4i64.nxv4i64( , , - i32); + iXLen); -define @intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -416,7 +418,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv4i64.nxv4i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -424,9 +426,9 @@ entry: declare @llvm.riscv.vmsbc.nxv8i64.nxv8i64( , , - i32); + iXLen); -define @intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma @@ -436,7 +438,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv8i64.nxv8i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -444,9 +446,9 @@ entry: declare @llvm.riscv.vmsbc.nxv1i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -456,7 +458,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv1i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -464,9 +466,9 @@ entry: declare @llvm.riscv.vmsbc.nxv2i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -476,7 +478,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv2i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -484,9 +486,9 @@ entry: declare @llvm.riscv.vmsbc.nxv4i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -496,7 +498,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv4i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -504,9 +506,9 @@ entry: declare @llvm.riscv.vmsbc.nxv8i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -516,7 +518,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv8i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -524,9 +526,9 @@ entry: declare @llvm.riscv.vmsbc.nxv16i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -536,7 +538,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv16i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -544,9 +546,9 @@ entry: declare @llvm.riscv.vmsbc.nxv32i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -556,7 +558,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv32i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -564,9 +566,9 @@ entry: declare @llvm.riscv.vmsbc.nxv64i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma @@ -576,7 +578,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv64i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -584,9 +586,9 @@ entry: declare @llvm.riscv.vmsbc.nxv1i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -596,7 +598,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv1i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -604,9 +606,9 @@ entry: declare @llvm.riscv.vmsbc.nxv2i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -616,7 +618,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv2i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -624,9 +626,9 @@ entry: declare @llvm.riscv.vmsbc.nxv4i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -636,7 +638,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv4i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -644,9 +646,9 @@ entry: declare @llvm.riscv.vmsbc.nxv8i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -656,7 +658,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv8i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -664,9 +666,9 @@ entry: declare @llvm.riscv.vmsbc.nxv16i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -676,7 +678,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv16i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -684,9 +686,9 @@ entry: declare @llvm.riscv.vmsbc.nxv32i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma @@ -696,7 +698,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv32i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -704,9 +706,9 @@ entry: declare @llvm.riscv.vmsbc.nxv1i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -716,7 +718,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv1i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -724,9 +726,9 @@ entry: declare @llvm.riscv.vmsbc.nxv2i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -736,7 +738,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv2i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -744,9 +746,9 @@ entry: declare @llvm.riscv.vmsbc.nxv4i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -756,7 +758,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv4i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -764,9 +766,9 @@ entry: declare @llvm.riscv.vmsbc.nxv8i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -776,7 +778,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv8i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -784,9 +786,9 @@ entry: declare @llvm.riscv.vmsbc.nxv16i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma @@ -796,7 +798,7 @@ entry: %a = call @llvm.riscv.vmsbc.nxv16i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -804,25 +806,31 @@ entry: declare @llvm.riscv.vmsbc.nxv1i64.i64( , i64, - i32); - -define @intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vmsbc.vv v0, v8, v9 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vmsbc.vv v0, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmsbc.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.nxv1i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -830,25 +838,31 @@ entry: declare @llvm.riscv.vmsbc.nxv2i64.i64( , i64, - i32); - -define @intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vmsbc.vv v0, v8, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vmsbc.vv v0, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vmsbc.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.nxv2i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -856,25 +870,31 @@ entry: declare @llvm.riscv.vmsbc.nxv4i64.i64( , i64, - i32); - -define @intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmsbc.vv v0, v8, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmsbc.vv v0, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmsbc.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.nxv4i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -882,25 +902,31 @@ entry: declare @llvm.riscv.vmsbc.nxv8i64.i64( , i64, - i32); - -define @intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vmsbc.vv v0, v8, v16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vmsbc.vv v0, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmsbc.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.nxv8i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll deleted file mode 100644 index 0c0d444..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll +++ /dev/null @@ -1,2414 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmseq.nxv1i8( - , - , - i64); - -define @intrinsic_vmseq_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv1i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv1i8( - , - , - , - , - i64); - -define @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmseq.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmseq.nxv1i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmseq.mask.nxv1i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv2i8( - , - , - i64); - -define @intrinsic_vmseq_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv2i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv2i8( - , - , - , - , - i64); - -define @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmseq.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmseq.nxv2i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmseq.mask.nxv2i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv4i8( - , - , - i64); - -define @intrinsic_vmseq_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv4i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv4i8( - , - , - , - , - i64); - -define @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmseq.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmseq.nxv4i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmseq.mask.nxv4i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv8i8( - , - , - i64); - -define @intrinsic_vmseq_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv8i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv8i8( - , - , - , - , - i64); - -define @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmseq.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmseq.nxv8i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmseq.mask.nxv8i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv16i8( - , - , - i64); - -define @intrinsic_vmseq_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vv_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmseq.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv16i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv16i8( - , - , - , - , - i64); - -define @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmseq.vv v14, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmseq.vv v8, v10, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmseq.nxv16i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmseq.mask.nxv16i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv32i8( - , - , - i64); - -define @intrinsic_vmseq_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vv_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmseq.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv32i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv32i8( - , - , - , - , - i64); - -define @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmseq.vv v20, v8, v12 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmseq.vv v8, v12, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmseq.nxv32i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmseq.mask.nxv32i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv1i16( - , - , - i64); - -define @intrinsic_vmseq_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv1i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv1i16( - , - , - , - , - i64); - -define @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmseq.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmseq.nxv1i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmseq.mask.nxv1i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv2i16( - , - , - i64); - -define @intrinsic_vmseq_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv2i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv2i16( - , - , - , - , - i64); - -define @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmseq.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmseq.nxv2i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmseq.mask.nxv2i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv4i16( - , - , - i64); - -define @intrinsic_vmseq_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv4i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv4i16( - , - , - , - , - i64); - -define @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmseq.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmseq.nxv4i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmseq.mask.nxv4i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv8i16( - , - , - i64); - -define @intrinsic_vmseq_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmseq.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv8i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv8i16( - , - , - , - , - i64); - -define @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmseq.vv v14, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmseq.vv v8, v10, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmseq.nxv8i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmseq.mask.nxv8i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv16i16( - , - , - i64); - -define @intrinsic_vmseq_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vv_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmseq.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv16i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv16i16( - , - , - , - , - i64); - -define @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmseq.vv v20, v8, v12 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmseq.vv v8, v12, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmseq.nxv16i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmseq.mask.nxv16i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv1i32( - , - , - i64); - -define @intrinsic_vmseq_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv1i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv1i32( - , - , - , - , - i64); - -define @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmseq.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmseq.nxv1i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmseq.mask.nxv1i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv2i32( - , - , - i64); - -define @intrinsic_vmseq_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv2i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv2i32( - , - , - , - , - i64); - -define @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmseq.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmseq.nxv2i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmseq.mask.nxv2i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv4i32( - , - , - i64); - -define @intrinsic_vmseq_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmseq.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv4i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv4i32( - , - , - , - , - i64); - -define @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmseq.vv v14, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmseq.vv v8, v10, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmseq.nxv4i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmseq.mask.nxv4i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv8i32( - , - , - i64); - -define @intrinsic_vmseq_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmseq.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv8i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv8i32( - , - , - , - , - i64); - -define @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmseq.vv v20, v8, v12 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmseq.vv v8, v12, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmseq.nxv8i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmseq.mask.nxv8i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv1i64( - , - , - i64); - -define @intrinsic_vmseq_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv1i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv1i64( - , - , - , - , - i64); - -define @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmseq.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmseq.nxv1i64( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmseq.mask.nxv1i64( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv2i64( - , - , - i64); - -define @intrinsic_vmseq_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmseq.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv2i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv2i64( - , - , - , - , - i64); - -define @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmseq.vv v14, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmseq.vv v8, v10, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmseq.nxv2i64( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmseq.mask.nxv2i64( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv4i64( - , - , - i64); - -define @intrinsic_vmseq_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmseq.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv4i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv4i64( - , - , - , - , - i64); - -define @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmseq.vv v20, v8, v12 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmseq.vv v8, v12, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmseq.nxv4i64( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmseq.mask.nxv4i64( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv1i8.i8( - , - i8, - i64); - -define @intrinsic_vmseq_vx_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vmseq.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv1i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv1i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmseq_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv1i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv2i8.i8( - , - i8, - i64); - -define @intrinsic_vmseq_vx_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vmseq.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv2i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv2i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmseq_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv2i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv4i8.i8( - , - i8, - i64); - -define @intrinsic_vmseq_vx_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vmseq.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv4i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv4i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmseq_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv4i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv8i8.i8( - , - i8, - i64); - -define @intrinsic_vmseq_vx_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vmseq.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv8i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv8i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmseq_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv8i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv16i8.i8( - , - i8, - i64); - -define @intrinsic_vmseq_vx_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vx_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vmseq.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv16i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv16i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmseq_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmseq.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv16i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv32i8.i8( - , - i8, - i64); - -define @intrinsic_vmseq_vx_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vx_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vmseq.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv32i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv32i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmseq_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmseq.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv32i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv1i16.i16( - , - i16, - i64); - -define @intrinsic_vmseq_vx_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vmseq.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv1i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv1i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmseq_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv1i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv2i16.i16( - , - i16, - i64); - -define @intrinsic_vmseq_vx_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vmseq.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv2i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv2i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmseq_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv2i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv4i16.i16( - , - i16, - i64); - -define @intrinsic_vmseq_vx_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vmseq.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv4i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv4i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmseq_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv4i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv8i16.i16( - , - i16, - i64); - -define @intrinsic_vmseq_vx_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vmseq.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv8i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv8i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmseq_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmseq.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv8i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv16i16.i16( - , - i16, - i64); - -define @intrinsic_vmseq_vx_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vx_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vmseq.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv16i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv16i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmseq_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmseq.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv16i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv1i32.i32( - , - i32, - i64); - -define @intrinsic_vmseq_vx_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vmseq.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv1i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv1i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmseq_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv1i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv2i32.i32( - , - i32, - i64); - -define @intrinsic_vmseq_vx_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vmseq.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv2i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv2i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmseq_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv2i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv4i32.i32( - , - i32, - i64); - -define @intrinsic_vmseq_vx_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vmseq.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv4i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv4i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmseq_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmseq.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv4i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv8i32.i32( - , - i32, - i64); - -define @intrinsic_vmseq_vx_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vmseq.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv8i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv8i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmseq_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmseq.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv8i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv1i64.i64( - , - i64, - i64); - -define @intrinsic_vmseq_vx_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vmseq.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv1i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv1i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vmseq_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv2i64.i64( - , - i64, - i64); - -define @intrinsic_vmseq_vx_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vmseq.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv2i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv2i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vmseq_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmseq.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmseq.nxv4i64.i64( - , - i64, - i64); - -define @intrinsic_vmseq_vx_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vmseq.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv4i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmseq.mask.nxv4i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vmseq_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmseq.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4) - - ret %a -} - -define @intrinsic_vmseq_vi_nxv1i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmseq.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv1i8.i8( - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmseq_mask_vi_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv1i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmseq_vi_nxv2i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmseq.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv2i8.i8( - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmseq_mask_vi_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv2i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmseq_vi_nxv4i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmseq.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv4i8.i8( - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmseq_mask_vi_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv4i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmseq_vi_nxv8i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmseq.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv8i8.i8( - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmseq_mask_vi_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv8i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmseq_vi_nxv16i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vi_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmseq.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv16i8.i8( - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmseq_mask_vi_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmseq.vi v11, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv16i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmseq_vi_nxv32i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vi_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmseq.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv32i8.i8( - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmseq_mask_vi_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmseq.vi v13, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv32i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmseq_vi_nxv1i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmseq.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv1i16.i16( - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmseq_mask_vi_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv1i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmseq_vi_nxv2i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmseq.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv2i16.i16( - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmseq_mask_vi_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv2i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmseq_vi_nxv4i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmseq.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv4i16.i16( - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmseq_mask_vi_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv4i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmseq_vi_nxv8i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmseq.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv8i16.i16( - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmseq_mask_vi_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmseq.vi v11, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv8i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmseq_vi_nxv16i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vi_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmseq.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv16i16.i16( - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmseq_mask_vi_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmseq.vi v13, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv16i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmseq_vi_nxv1i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmseq.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv1i32.i32( - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmseq_mask_vi_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv1i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmseq_vi_nxv2i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmseq.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv2i32.i32( - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmseq_mask_vi_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv2i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmseq_vi_nxv4i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmseq.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv4i32.i32( - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmseq_mask_vi_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmseq.vi v11, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv4i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmseq_vi_nxv8i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmseq.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv8i32.i32( - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmseq_mask_vi_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmseq.vi v13, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv8i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmseq_vi_nxv1i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmseq.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv1i64.i64( - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmseq_mask_vi_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv1i64.i64( - %0, - %1, - i64 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmseq_vi_nxv2i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmseq.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv2i64.i64( - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmseq_mask_vi_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmseq.vi v11, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv2i64.i64( - %0, - %1, - i64 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmseq_vi_nxv4i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmseq.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv4i64.i64( - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmseq_mask_vi_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmseq.vi v13, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.mask.nxv4i64.i64( - %0, - %1, - i64 9, - %2, - i64 %3) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq.ll similarity index 84% rename from llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vmseq.ll index 7184a7d..c8c223c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmseq.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vmseq.nxv1i8( , , - i32); + iXLen); -define @intrinsic_vmseq_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmseq_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv1i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -26,9 +28,9 @@ declare @llvm.riscv.vmseq.mask.nxv1i8( , , , - i32); + iXLen); -define @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -42,13 +44,13 @@ entry: %mask = call @llvm.riscv.vmseq.nxv1i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmseq.mask.nxv1i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -56,9 +58,9 @@ entry: declare @llvm.riscv.vmseq.nxv2i8( , , - i32); + iXLen); -define @intrinsic_vmseq_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmseq_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -68,7 +70,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv2i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -78,9 +80,9 @@ declare @llvm.riscv.vmseq.mask.nxv2i8( , , , - i32); + iXLen); -define @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -94,13 +96,13 @@ entry: %mask = call @llvm.riscv.vmseq.nxv2i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmseq.mask.nxv2i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -108,9 +110,9 @@ entry: declare @llvm.riscv.vmseq.nxv4i8( , , - i32); + iXLen); -define @intrinsic_vmseq_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmseq_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -120,7 +122,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv4i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -130,9 +132,9 @@ declare @llvm.riscv.vmseq.mask.nxv4i8( , , , - i32); + iXLen); -define @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -146,13 +148,13 @@ entry: %mask = call @llvm.riscv.vmseq.nxv4i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmseq.mask.nxv4i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -160,9 +162,9 @@ entry: declare @llvm.riscv.vmseq.nxv8i8( , , - i32); + iXLen); -define @intrinsic_vmseq_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmseq_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -172,7 +174,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv8i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -182,9 +184,9 @@ declare @llvm.riscv.vmseq.mask.nxv8i8( , , , - i32); + iXLen); -define @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -198,13 +200,13 @@ entry: %mask = call @llvm.riscv.vmseq.nxv8i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmseq.mask.nxv8i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -212,9 +214,9 @@ entry: declare @llvm.riscv.vmseq.nxv16i8( , , - i32); + iXLen); -define @intrinsic_vmseq_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmseq_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -224,7 +226,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv16i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -234,9 +236,9 @@ declare @llvm.riscv.vmseq.mask.nxv16i8( , , , - i32); + iXLen); -define @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -250,13 +252,13 @@ entry: %mask = call @llvm.riscv.vmseq.nxv16i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmseq.mask.nxv16i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -264,9 +266,9 @@ entry: declare @llvm.riscv.vmseq.nxv32i8( , , - i32); + iXLen); -define @intrinsic_vmseq_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmseq_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -276,7 +278,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv32i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -286,9 +288,9 @@ declare @llvm.riscv.vmseq.mask.nxv32i8( , , , - i32); + iXLen); -define @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -302,13 +304,13 @@ entry: %mask = call @llvm.riscv.vmseq.nxv32i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmseq.mask.nxv32i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -316,9 +318,9 @@ entry: declare @llvm.riscv.vmseq.nxv1i16( , , - i32); + iXLen); -define @intrinsic_vmseq_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmseq_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -328,7 +330,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv1i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -338,9 +340,9 @@ declare @llvm.riscv.vmseq.mask.nxv1i16( , , , - i32); + iXLen); -define @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -354,13 +356,13 @@ entry: %mask = call @llvm.riscv.vmseq.nxv1i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmseq.mask.nxv1i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -368,9 +370,9 @@ entry: declare @llvm.riscv.vmseq.nxv2i16( , , - i32); + iXLen); -define @intrinsic_vmseq_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmseq_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -380,7 +382,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv2i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -390,9 +392,9 @@ declare @llvm.riscv.vmseq.mask.nxv2i16( , , , - i32); + iXLen); -define @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -406,13 +408,13 @@ entry: %mask = call @llvm.riscv.vmseq.nxv2i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmseq.mask.nxv2i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -420,9 +422,9 @@ entry: declare @llvm.riscv.vmseq.nxv4i16( , , - i32); + iXLen); -define @intrinsic_vmseq_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmseq_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -432,7 +434,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv4i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vmseq.mask.nxv4i16( , , , - i32); + iXLen); -define @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -458,13 +460,13 @@ entry: %mask = call @llvm.riscv.vmseq.nxv4i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmseq.mask.nxv4i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -472,9 +474,9 @@ entry: declare @llvm.riscv.vmseq.nxv8i16( , , - i32); + iXLen); -define @intrinsic_vmseq_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmseq_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -484,7 +486,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv8i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -494,9 +496,9 @@ declare @llvm.riscv.vmseq.mask.nxv8i16( , , , - i32); + iXLen); -define @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -510,13 +512,13 @@ entry: %mask = call @llvm.riscv.vmseq.nxv8i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmseq.mask.nxv8i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -524,9 +526,9 @@ entry: declare @llvm.riscv.vmseq.nxv16i16( , , - i32); + iXLen); -define @intrinsic_vmseq_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmseq_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -536,7 +538,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv16i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -546,9 +548,9 @@ declare @llvm.riscv.vmseq.mask.nxv16i16( , , , - i32); + iXLen); -define @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -562,13 +564,13 @@ entry: %mask = call @llvm.riscv.vmseq.nxv16i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmseq.mask.nxv16i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -576,9 +578,9 @@ entry: declare @llvm.riscv.vmseq.nxv1i32( , , - i32); + iXLen); -define @intrinsic_vmseq_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmseq_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -588,7 +590,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv1i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -598,9 +600,9 @@ declare @llvm.riscv.vmseq.mask.nxv1i32( , , , - i32); + iXLen); -define @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -614,13 +616,13 @@ entry: %mask = call @llvm.riscv.vmseq.nxv1i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmseq.mask.nxv1i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -628,9 +630,9 @@ entry: declare @llvm.riscv.vmseq.nxv2i32( , , - i32); + iXLen); -define @intrinsic_vmseq_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmseq_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -640,7 +642,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv2i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -650,9 +652,9 @@ declare @llvm.riscv.vmseq.mask.nxv2i32( , , , - i32); + iXLen); -define @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -666,13 +668,13 @@ entry: %mask = call @llvm.riscv.vmseq.nxv2i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmseq.mask.nxv2i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -680,9 +682,9 @@ entry: declare @llvm.riscv.vmseq.nxv4i32( , , - i32); + iXLen); -define @intrinsic_vmseq_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmseq_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -692,7 +694,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv4i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -702,9 +704,9 @@ declare @llvm.riscv.vmseq.mask.nxv4i32( , , , - i32); + iXLen); -define @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -718,13 +720,13 @@ entry: %mask = call @llvm.riscv.vmseq.nxv4i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmseq.mask.nxv4i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -732,9 +734,9 @@ entry: declare @llvm.riscv.vmseq.nxv8i32( , , - i32); + iXLen); -define @intrinsic_vmseq_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmseq_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -744,7 +746,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv8i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -754,9 +756,9 @@ declare @llvm.riscv.vmseq.mask.nxv8i32( , , , - i32); + iXLen); -define @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -770,13 +772,13 @@ entry: %mask = call @llvm.riscv.vmseq.nxv8i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmseq.mask.nxv8i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -784,9 +786,9 @@ entry: declare @llvm.riscv.vmseq.nxv1i64( , , - i32); + iXLen); -define @intrinsic_vmseq_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmseq_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -796,7 +798,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv1i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -806,9 +808,9 @@ declare @llvm.riscv.vmseq.mask.nxv1i64( , , , - i32); + iXLen); -define @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -822,13 +824,13 @@ entry: %mask = call @llvm.riscv.vmseq.nxv1i64( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmseq.mask.nxv1i64( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -836,9 +838,9 @@ entry: declare @llvm.riscv.vmseq.nxv2i64( , , - i32); + iXLen); -define @intrinsic_vmseq_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmseq_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -848,7 +850,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv2i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -858,9 +860,9 @@ declare @llvm.riscv.vmseq.mask.nxv2i64( , , , - i32); + iXLen); -define @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -874,13 +876,13 @@ entry: %mask = call @llvm.riscv.vmseq.nxv2i64( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmseq.mask.nxv2i64( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -888,9 +890,9 @@ entry: declare @llvm.riscv.vmseq.nxv4i64( , , - i32); + iXLen); -define @intrinsic_vmseq_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmseq_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -900,7 +902,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv4i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -910,9 +912,9 @@ declare @llvm.riscv.vmseq.mask.nxv4i64( , , , - i32); + iXLen); -define @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -926,13 +928,13 @@ entry: %mask = call @llvm.riscv.vmseq.nxv4i64( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmseq.mask.nxv4i64( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -940,9 +942,9 @@ entry: declare @llvm.riscv.vmseq.nxv1i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmseq_vx_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmseq_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -952,7 +954,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv1i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -962,9 +964,9 @@ declare @llvm.riscv.vmseq.mask.nxv1i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmseq_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -979,7 +981,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -987,9 +989,9 @@ entry: declare @llvm.riscv.vmseq.nxv2i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmseq_vx_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmseq_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -999,7 +1001,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv2i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1009,9 +1011,9 @@ declare @llvm.riscv.vmseq.mask.nxv2i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmseq_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1026,7 +1028,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1034,9 +1036,9 @@ entry: declare @llvm.riscv.vmseq.nxv4i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmseq_vx_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmseq_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -1046,7 +1048,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv4i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1056,9 +1058,9 @@ declare @llvm.riscv.vmseq.mask.nxv4i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmseq_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1073,7 +1075,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1081,9 +1083,9 @@ entry: declare @llvm.riscv.vmseq.nxv8i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmseq_vx_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmseq_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -1093,7 +1095,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv8i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1103,9 +1105,9 @@ declare @llvm.riscv.vmseq.mask.nxv8i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmseq_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1120,7 +1122,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1128,9 +1130,9 @@ entry: declare @llvm.riscv.vmseq.nxv16i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmseq_vx_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmseq_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -1140,7 +1142,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv16i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1150,9 +1152,9 @@ declare @llvm.riscv.vmseq.mask.nxv16i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmseq_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1167,7 +1169,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1175,9 +1177,9 @@ entry: declare @llvm.riscv.vmseq.nxv32i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmseq_vx_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmseq_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -1187,7 +1189,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv32i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1197,9 +1199,9 @@ declare @llvm.riscv.vmseq.mask.nxv32i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmseq_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1214,7 +1216,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1222,9 +1224,9 @@ entry: declare @llvm.riscv.vmseq.nxv1i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmseq_vx_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmseq_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -1234,7 +1236,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv1i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1244,9 +1246,9 @@ declare @llvm.riscv.vmseq.mask.nxv1i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmseq_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1261,7 +1263,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1269,9 +1271,9 @@ entry: declare @llvm.riscv.vmseq.nxv2i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmseq_vx_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmseq_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -1281,7 +1283,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv2i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1291,9 +1293,9 @@ declare @llvm.riscv.vmseq.mask.nxv2i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmseq_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1308,7 +1310,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1316,9 +1318,9 @@ entry: declare @llvm.riscv.vmseq.nxv4i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmseq_vx_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmseq_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -1328,7 +1330,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv4i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1338,9 +1340,9 @@ declare @llvm.riscv.vmseq.mask.nxv4i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmseq_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1355,7 +1357,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1363,9 +1365,9 @@ entry: declare @llvm.riscv.vmseq.nxv8i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmseq_vx_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmseq_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -1375,7 +1377,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv8i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1385,9 +1387,9 @@ declare @llvm.riscv.vmseq.mask.nxv8i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmseq_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1402,7 +1404,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1410,9 +1412,9 @@ entry: declare @llvm.riscv.vmseq.nxv16i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmseq_vx_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmseq_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -1422,7 +1424,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv16i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1432,9 +1434,9 @@ declare @llvm.riscv.vmseq.mask.nxv16i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmseq_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1449,7 +1451,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1457,9 +1459,9 @@ entry: declare @llvm.riscv.vmseq.nxv1i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmseq_vx_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmseq_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -1469,7 +1471,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv1i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1479,9 +1481,9 @@ declare @llvm.riscv.vmseq.mask.nxv1i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmseq_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1496,7 +1498,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1504,9 +1506,9 @@ entry: declare @llvm.riscv.vmseq.nxv2i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmseq_vx_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmseq_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -1516,7 +1518,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv2i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1526,9 +1528,9 @@ declare @llvm.riscv.vmseq.mask.nxv2i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmseq_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1543,7 +1545,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1551,9 +1553,9 @@ entry: declare @llvm.riscv.vmseq.nxv4i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmseq_vx_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmseq_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -1563,7 +1565,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv4i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1573,9 +1575,9 @@ declare @llvm.riscv.vmseq.mask.nxv4i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmseq_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1590,7 +1592,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1598,9 +1600,9 @@ entry: declare @llvm.riscv.vmseq.nxv8i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmseq_vx_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmseq_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -1610,7 +1612,7 @@ entry: %a = call @llvm.riscv.vmseq.nxv8i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1620,9 +1622,9 @@ declare @llvm.riscv.vmseq.mask.nxv8i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmseq_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmseq_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1637,7 +1639,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1645,25 +1647,31 @@ entry: declare @llvm.riscv.vmseq.nxv1i64.i64( , i64, - i32); - -define @intrinsic_vmseq_vx_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vmseq.vv v0, v8, v9 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmseq_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmseq_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vmseq.vv v0, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmseq_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmseq.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmseq.nxv1i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1673,30 +1681,39 @@ declare @llvm.riscv.vmseq.mask.nxv1i64.i64( , i64, , - i32); - -define @intrinsic_vmseq_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v11, (a0), zero -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmseq.vv v10, v8, v11, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmseq_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v11, (a0), zero +; RV32-NEXT: vmv1r.v v10, v0 +; RV32-NEXT: vmv1r.v v0, v9 +; RV32-NEXT: vmseq.vv v10, v8, v11, v0.t +; RV32-NEXT: vmv.v.v v0, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v10, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vmv1r.v v0, v9 +; RV64-NEXT: vmseq.vx v10, v8, a0, v0.t +; RV64-NEXT: vmv.v.v v0, v10 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmseq.mask.nxv1i64.i64( %0, %1, i64 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1704,25 +1721,31 @@ entry: declare @llvm.riscv.vmseq.nxv2i64.i64( , i64, - i32); - -define @intrinsic_vmseq_vx_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vmseq.vv v0, v8, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmseq_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmseq_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vmseq.vv v0, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmseq_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vmseq.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmseq.nxv2i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1732,30 +1755,39 @@ declare @llvm.riscv.vmseq.mask.nxv2i64.i64( , i64, , - i32); - -define @intrinsic_vmseq_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmseq.vv v11, v8, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmseq_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmv1r.v v11, v0 +; RV32-NEXT: vmv1r.v v0, v10 +; RV32-NEXT: vmseq.vv v11, v8, v12, v0.t +; RV32-NEXT: vmv1r.v v0, v11 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v11, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vmv1r.v v0, v10 +; RV64-NEXT: vmseq.vx v11, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v11 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmseq.mask.nxv2i64.i64( %0, %1, i64 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1763,25 +1795,31 @@ entry: declare @llvm.riscv.vmseq.nxv4i64.i64( , i64, - i32); - -define @intrinsic_vmseq_vx_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmseq.vv v0, v8, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmseq_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmseq_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmseq.vv v0, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmseq_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmseq.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmseq.nxv4i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1791,35 +1829,44 @@ declare @llvm.riscv.vmseq.mask.nxv4i64.i64( , i64, , - i32); - -define @intrinsic_vmseq_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmseq.vv v13, v8, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmseq_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vmv1r.v v13, v0 +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: vmseq.vv v13, v8, v16, v0.t +; RV32-NEXT: vmv1r.v v0, v13 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v13, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: vmseq.vx v13, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v13 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmseq.mask.nxv4i64.i64( %0, %1, i64 %2, %3, - i32 %4) + iXLen %4) ret %a } -define @intrinsic_vmseq_vi_nxv1i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmseq_vi_nxv1i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -1829,12 +1876,12 @@ entry: %a = call @llvm.riscv.vmseq.nxv1i8.i8( %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmseq_mask_vi_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmseq_mask_vi_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1849,12 +1896,12 @@ entry: %1, i8 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmseq_vi_nxv2i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmseq_vi_nxv2i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -1864,12 +1911,12 @@ entry: %a = call @llvm.riscv.vmseq.nxv2i8.i8( %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmseq_mask_vi_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmseq_mask_vi_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1884,12 +1931,12 @@ entry: %1, i8 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmseq_vi_nxv4i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmseq_vi_nxv4i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -1899,12 +1946,12 @@ entry: %a = call @llvm.riscv.vmseq.nxv4i8.i8( %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmseq_mask_vi_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmseq_mask_vi_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1919,12 +1966,12 @@ entry: %1, i8 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmseq_vi_nxv8i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmseq_vi_nxv8i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -1934,12 +1981,12 @@ entry: %a = call @llvm.riscv.vmseq.nxv8i8.i8( %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmseq_mask_vi_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmseq_mask_vi_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1954,12 +2001,12 @@ entry: %1, i8 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmseq_vi_nxv16i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmseq_vi_nxv16i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -1969,12 +2016,12 @@ entry: %a = call @llvm.riscv.vmseq.nxv16i8.i8( %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmseq_mask_vi_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmseq_mask_vi_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1989,12 +2036,12 @@ entry: %1, i8 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmseq_vi_nxv32i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmseq_vi_nxv32i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -2004,12 +2051,12 @@ entry: %a = call @llvm.riscv.vmseq.nxv32i8.i8( %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmseq_mask_vi_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmseq_mask_vi_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2024,12 +2071,12 @@ entry: %1, i8 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmseq_vi_nxv1i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmseq_vi_nxv1i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -2039,12 +2086,12 @@ entry: %a = call @llvm.riscv.vmseq.nxv1i16.i16( %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmseq_mask_vi_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmseq_mask_vi_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2059,12 +2106,12 @@ entry: %1, i16 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmseq_vi_nxv2i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmseq_vi_nxv2i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -2074,12 +2121,12 @@ entry: %a = call @llvm.riscv.vmseq.nxv2i16.i16( %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmseq_mask_vi_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmseq_mask_vi_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2094,12 +2141,12 @@ entry: %1, i16 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmseq_vi_nxv4i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmseq_vi_nxv4i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -2109,12 +2156,12 @@ entry: %a = call @llvm.riscv.vmseq.nxv4i16.i16( %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmseq_mask_vi_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmseq_mask_vi_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2129,12 +2176,12 @@ entry: %1, i16 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmseq_vi_nxv8i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmseq_vi_nxv8i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -2144,12 +2191,12 @@ entry: %a = call @llvm.riscv.vmseq.nxv8i16.i16( %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmseq_mask_vi_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmseq_mask_vi_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2164,12 +2211,12 @@ entry: %1, i16 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmseq_vi_nxv16i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmseq_vi_nxv16i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -2179,12 +2226,12 @@ entry: %a = call @llvm.riscv.vmseq.nxv16i16.i16( %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmseq_mask_vi_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmseq_mask_vi_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2199,12 +2246,12 @@ entry: %1, i16 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmseq_vi_nxv1i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmseq_vi_nxv1i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -2214,12 +2261,12 @@ entry: %a = call @llvm.riscv.vmseq.nxv1i32.i32( %0, i32 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmseq_mask_vi_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmseq_mask_vi_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2234,12 +2281,12 @@ entry: %1, i32 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmseq_vi_nxv2i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmseq_vi_nxv2i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -2249,12 +2296,12 @@ entry: %a = call @llvm.riscv.vmseq.nxv2i32.i32( %0, i32 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmseq_mask_vi_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmseq_mask_vi_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2269,12 +2316,12 @@ entry: %1, i32 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmseq_vi_nxv4i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmseq_vi_nxv4i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -2284,12 +2331,12 @@ entry: %a = call @llvm.riscv.vmseq.nxv4i32.i32( %0, i32 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmseq_mask_vi_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmseq_mask_vi_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2304,12 +2351,12 @@ entry: %1, i32 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmseq_vi_nxv8i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmseq_vi_nxv8i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -2319,12 +2366,12 @@ entry: %a = call @llvm.riscv.vmseq.nxv8i32.i32( %0, i32 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmseq_mask_vi_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmseq_mask_vi_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2339,12 +2386,12 @@ entry: %1, i32 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmseq_vi_nxv1i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vmseq_vi_nxv1i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -2354,12 +2401,12 @@ entry: %a = call @llvm.riscv.vmseq.nxv1i64.i64( %0, i64 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmseq_mask_vi_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmseq_mask_vi_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2374,12 +2421,12 @@ entry: %1, i64 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmseq_vi_nxv2i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vmseq_vi_nxv2i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -2389,12 +2436,12 @@ entry: %a = call @llvm.riscv.vmseq.nxv2i64.i64( %0, i64 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmseq_mask_vi_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmseq_mask_vi_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2409,12 +2456,12 @@ entry: %1, i64 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmseq_vi_nxv4i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vmseq_vi_nxv4i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -2424,12 +2471,12 @@ entry: %a = call @llvm.riscv.vmseq.nxv4i64.i64( %0, i64 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmseq_mask_vi_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmseq_mask_vi_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2444,7 +2491,7 @@ entry: %1, i64 9, %2, - i32 %3) + iXLen %3) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll deleted file mode 100644 index ad067cf..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll +++ /dev/null @@ -1,2757 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmsge.nxv1i8( - , - , - i64); - -define @intrinsic_vmsge_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv1i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv1i8( - , - , - , - , - i64); - -define @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmsle.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsge.nxv1i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsge.mask.nxv1i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv2i8( - , - , - i64); - -define @intrinsic_vmsge_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv2i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv2i8( - , - , - , - , - i64); - -define @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmsle.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsge.nxv2i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsge.mask.nxv2i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv4i8( - , - , - i64); - -define @intrinsic_vmsge_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv4i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv4i8( - , - , - , - , - i64); - -define @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmsle.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsge.nxv4i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsge.mask.nxv4i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv8i8( - , - , - i64); - -define @intrinsic_vmsge_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv8i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv8i8( - , - , - , - , - i64); - -define @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmsle.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsge.nxv8i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsge.mask.nxv8i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv16i8( - , - , - i64); - -define @intrinsic_vmsge_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmsle.vv v0, v10, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv16i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv16i8( - , - , - , - , - i64); - -define @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmsle.vv v14, v10, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsge.nxv16i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsge.mask.nxv16i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv32i8( - , - , - i64); - -define @intrinsic_vmsge_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vv_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmsle.vv v0, v12, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv32i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv32i8( - , - , - , - , - i64); - -define @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmsle.vv v20, v12, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsge.nxv32i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsge.mask.nxv32i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv1i16( - , - , - i64); - -define @intrinsic_vmsge_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv1i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv1i16( - , - , - , - , - i64); - -define @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmsle.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsge.nxv1i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsge.mask.nxv1i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv2i16( - , - , - i64); - -define @intrinsic_vmsge_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv2i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv2i16( - , - , - , - , - i64); - -define @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmsle.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsge.nxv2i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsge.mask.nxv2i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv4i16( - , - , - i64); - -define @intrinsic_vmsge_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv4i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv4i16( - , - , - , - , - i64); - -define @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmsle.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsge.nxv4i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsge.mask.nxv4i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv8i16( - , - , - i64); - -define @intrinsic_vmsge_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmsle.vv v0, v10, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv8i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv8i16( - , - , - , - , - i64); - -define @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmsle.vv v14, v10, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsge.nxv8i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsge.mask.nxv8i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv16i16( - , - , - i64); - -define @intrinsic_vmsge_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmsle.vv v0, v12, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv16i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv16i16( - , - , - , - , - i64); - -define @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmsle.vv v20, v12, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsge.nxv16i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsge.mask.nxv16i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv1i32( - , - , - i64); - -define @intrinsic_vmsge_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv1i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv1i32( - , - , - , - , - i64); - -define @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmsle.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsge.nxv1i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsge.mask.nxv1i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv2i32( - , - , - i64); - -define @intrinsic_vmsge_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv2i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv2i32( - , - , - , - , - i64); - -define @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmsle.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsge.nxv2i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsge.mask.nxv2i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv4i32( - , - , - i64); - -define @intrinsic_vmsge_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmsle.vv v0, v10, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv4i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv4i32( - , - , - , - , - i64); - -define @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmsle.vv v14, v10, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsge.nxv4i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsge.mask.nxv4i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv8i32( - , - , - i64); - -define @intrinsic_vmsge_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmsle.vv v0, v12, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv8i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv8i32( - , - , - , - , - i64); - -define @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmsle.vv v20, v12, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsge.nxv8i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsge.mask.nxv8i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv1i64( - , - , - i64); - -define @intrinsic_vmsge_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv1i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv1i64( - , - , - , - , - i64); - -define @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmsle.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsge.nxv1i64( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsge.mask.nxv1i64( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv2i64( - , - , - i64); - -define @intrinsic_vmsge_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmsle.vv v0, v10, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv2i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv2i64( - , - , - , - , - i64); - -define @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmsle.vv v14, v10, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsge.nxv2i64( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsge.mask.nxv2i64( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv4i64( - , - , - i64); - -define @intrinsic_vmsge_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmsle.vv v0, v12, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv4i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv4i64( - , - , - , - , - i64); - -define @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmsle.vv v20, v12, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsge.nxv4i64( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsge.mask.nxv4i64( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv1i8.i8( - , - i8, - i64); - -define @intrinsic_vmsge_vx_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnot.m v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv1i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv1i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsge_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv1i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv2i8.i8( - , - i8, - i64); - -define @intrinsic_vmsge_vx_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnot.m v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv2i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv2i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsge_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv2i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv4i8.i8( - , - i8, - i64); - -define @intrinsic_vmsge_vx_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnot.m v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv4i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv4i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsge_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv4i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv8i8.i8( - , - i8, - i64); - -define @intrinsic_vmsge_vx_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnot.m v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv8i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv8i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsge_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv8i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv16i8.i8( - , - i8, - i64); - -define @intrinsic_vmsge_vx_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vmslt.vx v10, v8, a0 -; CHECK-NEXT: vmnot.m v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv16i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv16i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsge_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v11, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv16i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv32i8.i8( - , - i8, - i64); - -define @intrinsic_vmsge_vx_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vx_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vmslt.vx v12, v8, a0 -; CHECK-NEXT: vmnot.m v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv32i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv32i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsge_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v13, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv32i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv1i16.i16( - , - i16, - i64); - -define @intrinsic_vmsge_vx_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnot.m v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv1i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv1i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsge_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv1i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv2i16.i16( - , - i16, - i64); - -define @intrinsic_vmsge_vx_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnot.m v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv2i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv2i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsge_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv2i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv4i16.i16( - , - i16, - i64); - -define @intrinsic_vmsge_vx_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnot.m v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv4i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv4i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsge_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv4i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv8i16.i16( - , - i16, - i64); - -define @intrinsic_vmsge_vx_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vmslt.vx v10, v8, a0 -; CHECK-NEXT: vmnot.m v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv8i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv8i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsge_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v11, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv8i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv16i16.i16( - , - i16, - i64); - -define @intrinsic_vmsge_vx_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vmslt.vx v12, v8, a0 -; CHECK-NEXT: vmnot.m v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv16i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv16i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsge_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v13, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv16i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv1i32.i32( - , - i32, - i64); - -define @intrinsic_vmsge_vx_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnot.m v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv1i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv1i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmsge_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv1i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv2i32.i32( - , - i32, - i64); - -define @intrinsic_vmsge_vx_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnot.m v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv2i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv2i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmsge_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv2i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv4i32.i32( - , - i32, - i64); - -define @intrinsic_vmsge_vx_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vmslt.vx v10, v8, a0 -; CHECK-NEXT: vmnot.m v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv4i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv4i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmsge_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v11, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv4i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv8i32.i32( - , - i32, - i64); - -define @intrinsic_vmsge_vx_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vmslt.vx v12, v8, a0 -; CHECK-NEXT: vmnot.m v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv8i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv8i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmsge_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v13, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv8i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv1i64.i64( - , - i64, - i64); - -define @intrinsic_vmsge_vx_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnot.m v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv1i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv1i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vmsge_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv2i64.i64( - , - i64, - i64); - -define @intrinsic_vmsge_vx_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vmslt.vx v10, v8, a0 -; CHECK-NEXT: vmnot.m v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv2i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv2i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vmsge_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v11, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsge.nxv4i64.i64( - , - i64, - i64); - -define @intrinsic_vmsge_vx_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vmslt.vx v12, v8, a0 -; CHECK-NEXT: vmnot.m v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv4i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsge.mask.nxv4i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vmsge_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v13, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4) - - ret %a -} - -define @intrinsic_vmsge_vi_nxv1i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, -16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv1i8.i8( - %0, - i8 -15, - i64 %1) - - ret %a -} - -define @intrinsic_vmsge_mask_vi_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vi v10, v8, -15, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv1i8.i8( - %0, - %1, - i8 -14, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_vi_nxv2i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, -14 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv2i8.i8( - %0, - i8 -13, - i64 %1) - - ret %a -} - -define @intrinsic_vmsge_mask_vi_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vi v10, v8, -13, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv2i8.i8( - %0, - %1, - i8 -12, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_vi_nxv4i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, -12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv4i8.i8( - %0, - i8 -11, - i64 %1) - - ret %a -} - -define @intrinsic_vmsge_mask_vi_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vi v10, v8, -11, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv4i8.i8( - %0, - %1, - i8 -10, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_vi_nxv8i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, -10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv8i8.i8( - %0, - i8 -9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsge_mask_vi_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vi v10, v8, -9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv8i8.i8( - %0, - %1, - i8 -8, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_vi_nxv16i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, -8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv16i8.i8( - %0, - i8 -7, - i64 %1) - - ret %a -} - -define @intrinsic_vmsge_mask_vi_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsgt.vi v11, v8, -7, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv16i8.i8( - %0, - %1, - i8 -6, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_vi_nxv32i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vi_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, -6 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv32i8.i8( - %0, - i8 -5, - i64 %1) - - ret %a -} - -define @intrinsic_vmsge_mask_vi_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsgt.vi v13, v8, -5, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv32i8.i8( - %0, - %1, - i8 -4, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_vi_nxv1i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, -4 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv1i16.i16( - %0, - i16 -3, - i64 %1) - - ret %a -} - -define @intrinsic_vmsge_mask_vi_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vi v10, v8, -3, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv1i16.i16( - %0, - %1, - i16 -2, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_vi_nxv2i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, -2 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv2i16.i16( - %0, - i16 -1, - i64 %1) - - ret %a -} - -define @intrinsic_vmsge_mask_vi_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vi v10, v8, -1, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv2i16.i16( - %0, - %1, - i16 0, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_vi_nxv4i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, -1 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv4i16.i16( - %0, - i16 0, - i64 %1) - - ret %a -} - -define @intrinsic_vmsge_mask_vi_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vi v10, v8, 0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv4i16.i16( - %0, - %1, - i16 1, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_vi_nxv8i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, 1 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv8i16.i16( - %0, - i16 2, - i64 %1) - - ret %a -} - -define @intrinsic_vmsge_mask_vi_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsgt.vi v11, v8, 2, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv8i16.i16( - %0, - %1, - i16 3, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_vi_nxv16i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, 3 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv16i16.i16( - %0, - i16 4, - i64 %1) - - ret %a -} - -define @intrinsic_vmsge_mask_vi_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsgt.vi v13, v8, 4, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv16i16.i16( - %0, - %1, - i16 5, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_vi_nxv1i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, 5 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv1i32.i32( - %0, - i32 6, - i64 %1) - - ret %a -} - -define @intrinsic_vmsge_mask_vi_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vi v10, v8, 6, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv1i32.i32( - %0, - %1, - i32 7, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_vi_nxv2i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, 7 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv2i32.i32( - %0, - i32 8, - i64 %1) - - ret %a -} - -define @intrinsic_vmsge_mask_vi_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vi v10, v8, 8, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv2i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_vi_nxv4i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv4i32.i32( - %0, - i32 10, - i64 %1) - - ret %a -} - -define @intrinsic_vmsge_mask_vi_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsgt.vi v11, v8, 10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv4i32.i32( - %0, - %1, - i32 11, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_vi_nxv8i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, 11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv8i32.i32( - %0, - i32 12, - i64 %1) - - ret %a -} - -define @intrinsic_vmsge_mask_vi_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsgt.vi v13, v8, 12, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv8i32.i32( - %0, - %1, - i32 13, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_vi_nxv1i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, 13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv1i64.i64( - %0, - i64 14, - i64 %1) - - ret %a -} - -define @intrinsic_vmsge_mask_vi_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vi v10, v8, 14, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv1i64.i64( - %0, - %1, - i64 15, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_vi_nxv2i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, 15 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv2i64.i64( - %0, - i64 16, - i64 %1) - - ret %a -} - -define @intrinsic_vmsge_mask_vi_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsgt.vi v11, v8, -16, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv2i64.i64( - %0, - %1, - i64 -15, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_vi_nxv4i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, -15 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.nxv4i64.i64( - %0, - i64 -14, - i64 %1) - - ret %a -} - -define @intrinsic_vmsge_mask_vi_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsgt.vi v13, v8, -14, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv4i64.i64( - %0, - %1, - i64 -13, - %2, - i64 %3) - - ret %a -} - -; Test cases where the mask and maskedoff are the same value. -define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8( %0, %1, i8 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv1i8.i8( - %0, - %1, - i8 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8( %0, %1, i8 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv2i8.i8( - %0, - %1, - i8 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8( %0, %1, i8 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv4i8.i8( - %0, - %1, - i8 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8( %0, %1, i8 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv8i8.i8( - %0, - %1, - i8 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8( %0, %1, i8 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vmslt.vx v10, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv16i8.i8( - %0, - %1, - i8 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8( %0, %1, i8 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vmslt.vx v12, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv32i8.i8( - %0, - %1, - i8 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16( %0, %1, i16 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv1i16.i16( - %0, - %1, - i16 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16( %0, %1, i16 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv2i16.i16( - %0, - %1, - i16 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16( %0, %1, i16 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv4i16.i16( - %0, - %1, - i16 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16( %0, %1, i16 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vmslt.vx v10, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv8i16.i16( - %0, - %1, - i16 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16( %0, %1, i16 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vmslt.vx v12, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv16i16.i16( - %0, - %1, - i16 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32( %0, %1, i32 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv1i32.i32( - %0, - %1, - i32 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32( %0, %1, i32 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv2i32.i32( - %0, - %1, - i32 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32( %0, %1, i32 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vmslt.vx v10, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv4i32.i32( - %0, - %1, - i32 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32( %0, %1, i32 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vmslt.vx v12, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv8i32.i32( - %0, - %1, - i32 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64( %0, %1, i64 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64( %0, %1, i64 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vmslt.vx v10, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64( %0, %1, i64 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vmslt.vx v12, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsge.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %0, - i64 %3) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge.ll similarity index 82% rename from llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vmsge.ll index 2a34f94..3e0c83e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsge.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vmsge.nxv1i8( , , - i32); + iXLen); -define @intrinsic_vmsge_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsge_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv1i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -26,9 +28,9 @@ declare @llvm.riscv.vmsge.mask.nxv1i8( , , , - i32); + iXLen); -define @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -42,13 +44,13 @@ entry: %mask = call @llvm.riscv.vmsge.nxv1i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsge.mask.nxv1i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -56,9 +58,9 @@ entry: declare @llvm.riscv.vmsge.nxv2i8( , , - i32); + iXLen); -define @intrinsic_vmsge_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsge_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -68,7 +70,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv2i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -78,9 +80,9 @@ declare @llvm.riscv.vmsge.mask.nxv2i8( , , , - i32); + iXLen); -define @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -94,13 +96,13 @@ entry: %mask = call @llvm.riscv.vmsge.nxv2i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsge.mask.nxv2i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -108,9 +110,9 @@ entry: declare @llvm.riscv.vmsge.nxv4i8( , , - i32); + iXLen); -define @intrinsic_vmsge_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsge_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -120,7 +122,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv4i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -130,9 +132,9 @@ declare @llvm.riscv.vmsge.mask.nxv4i8( , , , - i32); + iXLen); -define @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -146,13 +148,13 @@ entry: %mask = call @llvm.riscv.vmsge.nxv4i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsge.mask.nxv4i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -160,9 +162,9 @@ entry: declare @llvm.riscv.vmsge.nxv8i8( , , - i32); + iXLen); -define @intrinsic_vmsge_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsge_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -172,7 +174,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv8i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -182,9 +184,9 @@ declare @llvm.riscv.vmsge.mask.nxv8i8( , , , - i32); + iXLen); -define @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -198,13 +200,13 @@ entry: %mask = call @llvm.riscv.vmsge.nxv8i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsge.mask.nxv8i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -212,9 +214,9 @@ entry: declare @llvm.riscv.vmsge.nxv16i8( , , - i32); + iXLen); -define @intrinsic_vmsge_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsge_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -224,7 +226,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv16i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -234,9 +236,9 @@ declare @llvm.riscv.vmsge.mask.nxv16i8( , , , - i32); + iXLen); -define @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -250,13 +252,13 @@ entry: %mask = call @llvm.riscv.vmsge.nxv16i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsge.mask.nxv16i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -264,9 +266,9 @@ entry: declare @llvm.riscv.vmsge.nxv32i8( , , - i32); + iXLen); -define @intrinsic_vmsge_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsge_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -276,7 +278,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv32i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -286,9 +288,9 @@ declare @llvm.riscv.vmsge.mask.nxv32i8( , , , - i32); + iXLen); -define @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -302,13 +304,13 @@ entry: %mask = call @llvm.riscv.vmsge.nxv32i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsge.mask.nxv32i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -316,9 +318,9 @@ entry: declare @llvm.riscv.vmsge.nxv1i16( , , - i32); + iXLen); -define @intrinsic_vmsge_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsge_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -328,7 +330,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv1i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -338,9 +340,9 @@ declare @llvm.riscv.vmsge.mask.nxv1i16( , , , - i32); + iXLen); -define @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -354,13 +356,13 @@ entry: %mask = call @llvm.riscv.vmsge.nxv1i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsge.mask.nxv1i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -368,9 +370,9 @@ entry: declare @llvm.riscv.vmsge.nxv2i16( , , - i32); + iXLen); -define @intrinsic_vmsge_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsge_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -380,7 +382,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv2i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -390,9 +392,9 @@ declare @llvm.riscv.vmsge.mask.nxv2i16( , , , - i32); + iXLen); -define @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -406,13 +408,13 @@ entry: %mask = call @llvm.riscv.vmsge.nxv2i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsge.mask.nxv2i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -420,9 +422,9 @@ entry: declare @llvm.riscv.vmsge.nxv4i16( , , - i32); + iXLen); -define @intrinsic_vmsge_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsge_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -432,7 +434,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv4i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vmsge.mask.nxv4i16( , , , - i32); + iXLen); -define @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -458,13 +460,13 @@ entry: %mask = call @llvm.riscv.vmsge.nxv4i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsge.mask.nxv4i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -472,9 +474,9 @@ entry: declare @llvm.riscv.vmsge.nxv8i16( , , - i32); + iXLen); -define @intrinsic_vmsge_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsge_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -484,7 +486,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv8i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -494,9 +496,9 @@ declare @llvm.riscv.vmsge.mask.nxv8i16( , , , - i32); + iXLen); -define @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -510,13 +512,13 @@ entry: %mask = call @llvm.riscv.vmsge.nxv8i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsge.mask.nxv8i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -524,9 +526,9 @@ entry: declare @llvm.riscv.vmsge.nxv16i16( , , - i32); + iXLen); -define @intrinsic_vmsge_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsge_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -536,7 +538,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv16i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -546,9 +548,9 @@ declare @llvm.riscv.vmsge.mask.nxv16i16( , , , - i32); + iXLen); -define @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -562,13 +564,13 @@ entry: %mask = call @llvm.riscv.vmsge.nxv16i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsge.mask.nxv16i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -576,9 +578,9 @@ entry: declare @llvm.riscv.vmsge.nxv1i32( , , - i32); + iXLen); -define @intrinsic_vmsge_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsge_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -588,7 +590,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv1i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -598,9 +600,9 @@ declare @llvm.riscv.vmsge.mask.nxv1i32( , , , - i32); + iXLen); -define @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -614,13 +616,13 @@ entry: %mask = call @llvm.riscv.vmsge.nxv1i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsge.mask.nxv1i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -628,9 +630,9 @@ entry: declare @llvm.riscv.vmsge.nxv2i32( , , - i32); + iXLen); -define @intrinsic_vmsge_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsge_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -640,7 +642,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv2i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -650,9 +652,9 @@ declare @llvm.riscv.vmsge.mask.nxv2i32( , , , - i32); + iXLen); -define @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -666,13 +668,13 @@ entry: %mask = call @llvm.riscv.vmsge.nxv2i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsge.mask.nxv2i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -680,9 +682,9 @@ entry: declare @llvm.riscv.vmsge.nxv4i32( , , - i32); + iXLen); -define @intrinsic_vmsge_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsge_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -692,7 +694,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv4i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -702,9 +704,9 @@ declare @llvm.riscv.vmsge.mask.nxv4i32( , , , - i32); + iXLen); -define @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -718,13 +720,13 @@ entry: %mask = call @llvm.riscv.vmsge.nxv4i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsge.mask.nxv4i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -732,9 +734,9 @@ entry: declare @llvm.riscv.vmsge.nxv8i32( , , - i32); + iXLen); -define @intrinsic_vmsge_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsge_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -744,7 +746,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv8i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -754,9 +756,9 @@ declare @llvm.riscv.vmsge.mask.nxv8i32( , , , - i32); + iXLen); -define @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -770,13 +772,13 @@ entry: %mask = call @llvm.riscv.vmsge.nxv8i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsge.mask.nxv8i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -784,9 +786,9 @@ entry: declare @llvm.riscv.vmsge.nxv1i64( , , - i32); + iXLen); -define @intrinsic_vmsge_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsge_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -796,7 +798,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv1i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -806,9 +808,9 @@ declare @llvm.riscv.vmsge.mask.nxv1i64( , , , - i32); + iXLen); -define @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -822,13 +824,13 @@ entry: %mask = call @llvm.riscv.vmsge.nxv1i64( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsge.mask.nxv1i64( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -836,9 +838,9 @@ entry: declare @llvm.riscv.vmsge.nxv2i64( , , - i32); + iXLen); -define @intrinsic_vmsge_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsge_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -848,7 +850,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv2i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -858,9 +860,9 @@ declare @llvm.riscv.vmsge.mask.nxv2i64( , , , - i32); + iXLen); -define @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -874,13 +876,13 @@ entry: %mask = call @llvm.riscv.vmsge.nxv2i64( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsge.mask.nxv2i64( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -888,9 +890,9 @@ entry: declare @llvm.riscv.vmsge.nxv4i64( , , - i32); + iXLen); -define @intrinsic_vmsge_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsge_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -900,7 +902,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv4i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -910,9 +912,9 @@ declare @llvm.riscv.vmsge.mask.nxv4i64( , , , - i32); + iXLen); -define @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -926,13 +928,13 @@ entry: %mask = call @llvm.riscv.vmsge.nxv4i64( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsge.mask.nxv4i64( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -940,9 +942,9 @@ entry: declare @llvm.riscv.vmsge.nxv1i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsge_vx_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsge_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -953,7 +955,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv1i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -963,9 +965,9 @@ declare @llvm.riscv.vmsge.mask.nxv1i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsge_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -980,7 +982,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -988,9 +990,9 @@ entry: declare @llvm.riscv.vmsge.nxv2i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsge_vx_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsge_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -1001,7 +1003,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv2i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1011,9 +1013,9 @@ declare @llvm.riscv.vmsge.mask.nxv2i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsge_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1028,7 +1030,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1036,9 +1038,9 @@ entry: declare @llvm.riscv.vmsge.nxv4i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsge_vx_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsge_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -1049,7 +1051,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv4i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1059,9 +1061,9 @@ declare @llvm.riscv.vmsge.mask.nxv4i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsge_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1076,7 +1078,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1084,9 +1086,9 @@ entry: declare @llvm.riscv.vmsge.nxv8i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsge_vx_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsge_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -1097,7 +1099,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv8i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1107,9 +1109,9 @@ declare @llvm.riscv.vmsge.mask.nxv8i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsge_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1124,7 +1126,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1132,9 +1134,9 @@ entry: declare @llvm.riscv.vmsge.nxv16i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsge_vx_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsge_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -1145,7 +1147,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv16i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1155,9 +1157,9 @@ declare @llvm.riscv.vmsge.mask.nxv16i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsge_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1172,7 +1174,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1180,9 +1182,9 @@ entry: declare @llvm.riscv.vmsge.nxv32i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsge_vx_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsge_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -1193,7 +1195,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv32i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1203,9 +1205,9 @@ declare @llvm.riscv.vmsge.mask.nxv32i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsge_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1220,7 +1222,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1228,9 +1230,9 @@ entry: declare @llvm.riscv.vmsge.nxv1i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsge_vx_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsge_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -1241,7 +1243,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv1i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1251,9 +1253,9 @@ declare @llvm.riscv.vmsge.mask.nxv1i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsge_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1268,7 +1270,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1276,9 +1278,9 @@ entry: declare @llvm.riscv.vmsge.nxv2i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsge_vx_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsge_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -1289,7 +1291,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv2i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1299,9 +1301,9 @@ declare @llvm.riscv.vmsge.mask.nxv2i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsge_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1316,7 +1318,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1324,9 +1326,9 @@ entry: declare @llvm.riscv.vmsge.nxv4i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsge_vx_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsge_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -1337,7 +1339,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv4i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1347,9 +1349,9 @@ declare @llvm.riscv.vmsge.mask.nxv4i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsge_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1364,7 +1366,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1372,9 +1374,9 @@ entry: declare @llvm.riscv.vmsge.nxv8i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsge_vx_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsge_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -1385,7 +1387,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv8i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1395,9 +1397,9 @@ declare @llvm.riscv.vmsge.mask.nxv8i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsge_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1412,7 +1414,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1420,9 +1422,9 @@ entry: declare @llvm.riscv.vmsge.nxv16i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsge_vx_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsge_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -1433,7 +1435,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv16i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1443,9 +1445,9 @@ declare @llvm.riscv.vmsge.mask.nxv16i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsge_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1460,7 +1462,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1468,9 +1470,9 @@ entry: declare @llvm.riscv.vmsge.nxv1i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsge_vx_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsge_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -1481,7 +1483,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv1i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1491,9 +1493,9 @@ declare @llvm.riscv.vmsge.mask.nxv1i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsge_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1508,7 +1510,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1516,9 +1518,9 @@ entry: declare @llvm.riscv.vmsge.nxv2i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsge_vx_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsge_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -1529,7 +1531,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv2i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1539,9 +1541,9 @@ declare @llvm.riscv.vmsge.mask.nxv2i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsge_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1556,7 +1558,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1564,9 +1566,9 @@ entry: declare @llvm.riscv.vmsge.nxv4i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsge_vx_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsge_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -1577,7 +1579,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv4i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1587,9 +1589,9 @@ declare @llvm.riscv.vmsge.mask.nxv4i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsge_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1604,7 +1606,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1612,9 +1614,9 @@ entry: declare @llvm.riscv.vmsge.nxv8i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsge_vx_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsge_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -1625,7 +1627,7 @@ entry: %a = call @llvm.riscv.vmsge.nxv8i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1635,9 +1637,9 @@ declare @llvm.riscv.vmsge.mask.nxv8i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsge_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsge_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1652,7 +1654,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1660,25 +1662,32 @@ entry: declare @llvm.riscv.vmsge.nxv1i64.i64( , i64, - i32); - -define @intrinsic_vmsge_vx_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vmsle.vv v0, v9, v8 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsge_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsge_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vmsle.vv v0, v9, v8 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsge_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmslt.vx v8, v8, a0 +; RV64-NEXT: vmnot.m v0, v8 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv1i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1688,30 +1697,39 @@ declare @llvm.riscv.vmsge.mask.nxv1i64.i64( , i64, , - i32); - -define @intrinsic_vmsge_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v11, (a0), zero -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vv v10, v11, v8, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsge_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v11, (a0), zero +; RV32-NEXT: vmv1r.v v10, v0 +; RV32-NEXT: vmv1r.v v0, v9 +; RV32-NEXT: vmsle.vv v10, v11, v8, v0.t +; RV32-NEXT: vmv.v.v v0, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v10, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vmv1r.v v0, v9 +; RV64-NEXT: vmslt.vx v10, v8, a0, v0.t +; RV64-NEXT: vmxor.mm v0, v10, v9 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv1i64.i64( %0, %1, i64 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1719,25 +1737,32 @@ entry: declare @llvm.riscv.vmsge.nxv2i64.i64( , i64, - i32); - -define @intrinsic_vmsge_vx_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vmsle.vv v0, v10, v8 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsge_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsge_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vmsle.vv v0, v10, v8 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsge_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vmslt.vx v10, v8, a0 +; RV64-NEXT: vmnot.m v0, v10 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv2i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1747,30 +1772,39 @@ declare @llvm.riscv.vmsge.mask.nxv2i64.i64( , i64, , - i32); - -define @intrinsic_vmsge_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsle.vv v11, v12, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsge_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmv1r.v v11, v0 +; RV32-NEXT: vmv1r.v v0, v10 +; RV32-NEXT: vmsle.vv v11, v12, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v11 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v11, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vmv1r.v v0, v10 +; RV64-NEXT: vmslt.vx v11, v8, a0, v0.t +; RV64-NEXT: vmxor.mm v0, v11, v10 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv2i64.i64( %0, %1, i64 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1778,25 +1812,32 @@ entry: declare @llvm.riscv.vmsge.nxv4i64.i64( , i64, - i32); - -define @intrinsic_vmsge_vx_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmsle.vv v0, v12, v8 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsge_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsge_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmsle.vv v0, v12, v8 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsge_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmslt.vx v12, v8, a0 +; RV64-NEXT: vmnot.m v0, v12 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv4i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1806,35 +1847,44 @@ declare @llvm.riscv.vmsge.mask.nxv4i64.i64( , i64, , - i32); - -define @intrinsic_vmsge_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsle.vv v13, v16, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsge_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vmv1r.v v13, v0 +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: vmsle.vv v13, v16, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v13 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v13, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: vmslt.vx v13, v8, a0, v0.t +; RV64-NEXT: vmxor.mm v0, v13, v12 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv4i64.i64( %0, %1, i64 %2, %3, - i32 %4) + iXLen %4) ret %a } -define @intrinsic_vmsge_vi_nxv1i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsge_vi_nxv1i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -1844,12 +1894,12 @@ entry: %a = call @llvm.riscv.vmsge.nxv1i8.i8( %0, i8 -15, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsge_mask_vi_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsge_mask_vi_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1864,12 +1914,12 @@ entry: %1, i8 -14, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_vi_nxv2i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsge_vi_nxv2i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -1879,12 +1929,12 @@ entry: %a = call @llvm.riscv.vmsge.nxv2i8.i8( %0, i8 -13, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsge_mask_vi_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsge_mask_vi_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1899,12 +1949,12 @@ entry: %1, i8 -12, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_vi_nxv4i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsge_vi_nxv4i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -1914,12 +1964,12 @@ entry: %a = call @llvm.riscv.vmsge.nxv4i8.i8( %0, i8 -11, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsge_mask_vi_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsge_mask_vi_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1934,12 +1984,12 @@ entry: %1, i8 -10, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_vi_nxv8i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsge_vi_nxv8i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -1949,12 +1999,12 @@ entry: %a = call @llvm.riscv.vmsge.nxv8i8.i8( %0, i8 -9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsge_mask_vi_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsge_mask_vi_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1969,12 +2019,12 @@ entry: %1, i8 -8, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_vi_nxv16i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsge_vi_nxv16i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -1984,12 +2034,12 @@ entry: %a = call @llvm.riscv.vmsge.nxv16i8.i8( %0, i8 -7, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsge_mask_vi_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsge_mask_vi_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2004,12 +2054,12 @@ entry: %1, i8 -6, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_vi_nxv32i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsge_vi_nxv32i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -2019,12 +2069,12 @@ entry: %a = call @llvm.riscv.vmsge.nxv32i8.i8( %0, i8 -5, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsge_mask_vi_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsge_mask_vi_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2039,12 +2089,12 @@ entry: %1, i8 -4, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_vi_nxv1i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsge_vi_nxv1i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -2054,12 +2104,12 @@ entry: %a = call @llvm.riscv.vmsge.nxv1i16.i16( %0, i16 -3, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsge_mask_vi_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsge_mask_vi_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2074,12 +2124,12 @@ entry: %1, i16 -2, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_vi_nxv2i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsge_vi_nxv2i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -2089,12 +2139,12 @@ entry: %a = call @llvm.riscv.vmsge.nxv2i16.i16( %0, i16 -1, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsge_mask_vi_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsge_mask_vi_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2109,12 +2159,12 @@ entry: %1, i16 0, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_vi_nxv4i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsge_vi_nxv4i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -2124,12 +2174,12 @@ entry: %a = call @llvm.riscv.vmsge.nxv4i16.i16( %0, i16 0, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsge_mask_vi_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsge_mask_vi_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2144,12 +2194,12 @@ entry: %1, i16 1, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_vi_nxv8i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsge_vi_nxv8i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -2159,12 +2209,12 @@ entry: %a = call @llvm.riscv.vmsge.nxv8i16.i16( %0, i16 2, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsge_mask_vi_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsge_mask_vi_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2179,12 +2229,12 @@ entry: %1, i16 3, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_vi_nxv16i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsge_vi_nxv16i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -2194,12 +2244,12 @@ entry: %a = call @llvm.riscv.vmsge.nxv16i16.i16( %0, i16 4, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsge_mask_vi_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsge_mask_vi_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2214,12 +2264,12 @@ entry: %1, i16 5, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_vi_nxv1i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmsge_vi_nxv1i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -2229,12 +2279,12 @@ entry: %a = call @llvm.riscv.vmsge.nxv1i32.i32( %0, i32 6, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsge_mask_vi_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsge_mask_vi_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2249,12 +2299,12 @@ entry: %1, i32 7, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_vi_nxv2i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmsge_vi_nxv2i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -2264,12 +2314,12 @@ entry: %a = call @llvm.riscv.vmsge.nxv2i32.i32( %0, i32 8, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsge_mask_vi_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsge_mask_vi_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2284,12 +2334,12 @@ entry: %1, i32 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_vi_nxv4i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmsge_vi_nxv4i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -2299,12 +2349,12 @@ entry: %a = call @llvm.riscv.vmsge.nxv4i32.i32( %0, i32 10, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsge_mask_vi_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsge_mask_vi_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2319,12 +2369,12 @@ entry: %1, i32 11, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_vi_nxv8i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmsge_vi_nxv8i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -2334,12 +2384,12 @@ entry: %a = call @llvm.riscv.vmsge.nxv8i32.i32( %0, i32 12, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsge_mask_vi_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsge_mask_vi_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2354,12 +2404,12 @@ entry: %1, i32 13, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_vi_nxv1i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vmsge_vi_nxv1i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -2369,12 +2419,12 @@ entry: %a = call @llvm.riscv.vmsge.nxv1i64.i64( %0, i64 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsge_mask_vi_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsge_mask_vi_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2389,12 +2439,12 @@ entry: %1, i64 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_vi_nxv2i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vmsge_vi_nxv2i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -2404,12 +2454,12 @@ entry: %a = call @llvm.riscv.vmsge.nxv2i64.i64( %0, i64 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsge_mask_vi_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsge_mask_vi_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2424,12 +2474,12 @@ entry: %1, i64 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_vi_nxv4i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vmsge_vi_nxv4i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -2439,12 +2489,12 @@ entry: %a = call @llvm.riscv.vmsge.nxv4i64.i64( %0, i64 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsge_mask_vi_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsge_mask_vi_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2459,13 +2509,13 @@ entry: %1, i64 9, %2, - i32 %3) + iXLen %3) ret %a } ; Test cases where the mask and maskedoff are the same value. -define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8( %0, %1, i8 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -2478,12 +2528,12 @@ entry: %1, i8 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8( %0, %1, i8 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -2496,12 +2546,12 @@ entry: %1, i8 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8( %0, %1, i8 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -2514,12 +2564,12 @@ entry: %1, i8 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8( %0, %1, i8 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -2532,12 +2582,12 @@ entry: %1, i8 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +define @intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8( %0, %1, i8 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -2550,12 +2600,12 @@ entry: %1, i8 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +define @intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8( %0, %1, i8 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -2568,12 +2618,12 @@ entry: %1, i8 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16( %0, %1, i16 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -2586,12 +2636,12 @@ entry: %1, i16 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16( %0, %1, i16 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -2604,12 +2654,12 @@ entry: %1, i16 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16( %0, %1, i16 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -2622,12 +2672,12 @@ entry: %1, i16 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16( %0, %1, i16 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -2640,12 +2690,12 @@ entry: %1, i16 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +define @intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16( %0, %1, i16 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -2658,12 +2708,12 @@ entry: %1, i16 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -2676,12 +2726,12 @@ entry: %1, i32 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -2694,12 +2744,12 @@ entry: %1, i32 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -2712,12 +2762,12 @@ entry: %1, i32 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -2730,80 +2780,101 @@ entry: %1, i32 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64( %0, %1, i64 %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret +define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vmsle.vv v0, v9, v8, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmslt.vx v8, v8, a0 +; RV64-NEXT: vmandn.mm v0, v0, v8 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv1i64.i64( %0, %1, i64 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64( %0, %1, i64 %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vmsle.vv v10, v12, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret +define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmv1r.v v10, v0 +; RV32-NEXT: vmsle.vv v10, v12, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vmslt.vx v10, v8, a0 +; RV64-NEXT: vmandn.mm v0, v0, v10 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv2i64.i64( %0, %1, i64 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64( %0, %1, i64 %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: vmsle.vv v12, v16, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret +define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vmv1r.v v12, v0 +; RV32-NEXT: vmsle.vv v12, v16, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmslt.vx v12, v8, a0 +; RV64-NEXT: vmandn.mm v0, v0, v12 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv4i64.i64( %0, %1, i64 %2, %0, - i32 %3) + iXLen %3) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll deleted file mode 100644 index 36af3a9..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll +++ /dev/null @@ -1,2769 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmsgeu.nxv1i8( - , - , - i64); - -define @intrinsic_vmsgeu_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv1i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv1i8( - , - , - , - , - i64); - -define @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmsleu.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgeu.nxv1i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgeu.mask.nxv1i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv2i8( - , - , - i64); - -define @intrinsic_vmsgeu_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv2i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv2i8( - , - , - , - , - i64); - -define @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmsleu.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgeu.nxv2i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgeu.mask.nxv2i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv4i8( - , - , - i64); - -define @intrinsic_vmsgeu_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv4i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv4i8( - , - , - , - , - i64); - -define @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmsleu.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgeu.nxv4i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgeu.mask.nxv4i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv8i8( - , - , - i64); - -define @intrinsic_vmsgeu_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv8i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv8i8( - , - , - , - , - i64); - -define @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmsleu.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgeu.nxv8i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgeu.mask.nxv8i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv16i8( - , - , - i64); - -define @intrinsic_vmsgeu_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v10, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv16i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv16i8( - , - , - , - , - i64); - -define @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmsleu.vv v14, v10, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgeu.nxv16i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgeu.mask.nxv16i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv32i8( - , - , - i64); - -define @intrinsic_vmsgeu_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v12, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv32i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv32i8( - , - , - , - , - i64); - -define @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmsleu.vv v20, v12, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgeu.nxv32i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgeu.mask.nxv32i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv1i16( - , - , - i64); - -define @intrinsic_vmsgeu_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv1i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv1i16( - , - , - , - , - i64); - -define @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmsleu.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgeu.nxv1i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgeu.mask.nxv1i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv2i16( - , - , - i64); - -define @intrinsic_vmsgeu_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv2i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv2i16( - , - , - , - , - i64); - -define @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmsleu.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgeu.nxv2i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgeu.mask.nxv2i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv4i16( - , - , - i64); - -define @intrinsic_vmsgeu_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv4i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv4i16( - , - , - , - , - i64); - -define @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmsleu.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgeu.nxv4i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgeu.mask.nxv4i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv8i16( - , - , - i64); - -define @intrinsic_vmsgeu_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v10, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv8i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv8i16( - , - , - , - , - i64); - -define @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmsleu.vv v14, v10, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgeu.nxv8i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgeu.mask.nxv8i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv16i16( - , - , - i64); - -define @intrinsic_vmsgeu_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v12, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv16i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv16i16( - , - , - , - , - i64); - -define @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmsleu.vv v20, v12, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgeu.nxv16i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgeu.mask.nxv16i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv1i32( - , - , - i64); - -define @intrinsic_vmsgeu_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv1i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv1i32( - , - , - , - , - i64); - -define @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmsleu.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgeu.nxv1i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgeu.mask.nxv1i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv2i32( - , - , - i64); - -define @intrinsic_vmsgeu_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv2i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv2i32( - , - , - , - , - i64); - -define @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmsleu.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgeu.nxv2i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgeu.mask.nxv2i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv4i32( - , - , - i64); - -define @intrinsic_vmsgeu_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v10, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv4i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv4i32( - , - , - , - , - i64); - -define @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmsleu.vv v14, v10, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgeu.nxv4i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgeu.mask.nxv4i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv8i32( - , - , - i64); - -define @intrinsic_vmsgeu_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v12, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv8i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv8i32( - , - , - , - , - i64); - -define @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmsleu.vv v20, v12, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgeu.nxv8i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgeu.mask.nxv8i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv1i64( - , - , - i64); - -define @intrinsic_vmsgeu_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv1i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv1i64( - , - , - , - , - i64); - -define @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmsleu.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgeu.nxv1i64( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgeu.mask.nxv1i64( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv2i64( - , - , - i64); - -define @intrinsic_vmsgeu_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v10, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv2i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv2i64( - , - , - , - , - i64); - -define @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmsleu.vv v14, v10, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgeu.nxv2i64( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgeu.mask.nxv2i64( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv4i64( - , - , - i64); - -define @intrinsic_vmsgeu_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v12, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv4i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv4i64( - , - , - , - , - i64); - -define @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmsleu.vv v20, v12, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgeu.nxv4i64( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgeu.mask.nxv4i64( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv1i8.i8( - , - i8, - i64); - -define @intrinsic_vmsgeu_vx_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnot.m v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv1i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv1i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsgeu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv1i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv2i8.i8( - , - i8, - i64); - -define @intrinsic_vmsgeu_vx_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnot.m v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv2i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv2i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsgeu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv2i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv4i8.i8( - , - i8, - i64); - -define @intrinsic_vmsgeu_vx_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnot.m v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv4i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv4i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsgeu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv4i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv8i8.i8( - , - i8, - i64); - -define @intrinsic_vmsgeu_vx_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnot.m v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv8i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv8i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsgeu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv8i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv16i8.i8( - , - i8, - i64); - -define @intrinsic_vmsgeu_vx_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vmsltu.vx v10, v8, a0 -; CHECK-NEXT: vmnot.m v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv16i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv16i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsgeu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v11, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv16i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv32i8.i8( - , - i8, - i64); - -define @intrinsic_vmsgeu_vx_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vmsltu.vx v12, v8, a0 -; CHECK-NEXT: vmnot.m v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv32i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv32i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsgeu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v13, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv32i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv1i16.i16( - , - i16, - i64); - -define @intrinsic_vmsgeu_vx_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnot.m v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv1i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv1i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsgeu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv1i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv2i16.i16( - , - i16, - i64); - -define @intrinsic_vmsgeu_vx_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnot.m v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv2i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv2i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsgeu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv4i16.i16( - , - i16, - i64); - -define @intrinsic_vmsgeu_vx_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnot.m v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv4i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv4i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsgeu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv4i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv8i16.i16( - , - i16, - i64); - -define @intrinsic_vmsgeu_vx_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vmsltu.vx v10, v8, a0 -; CHECK-NEXT: vmnot.m v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv8i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv8i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsgeu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v11, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv8i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv16i16.i16( - , - i16, - i64); - -define @intrinsic_vmsgeu_vx_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vmsltu.vx v12, v8, a0 -; CHECK-NEXT: vmnot.m v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv16i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv16i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsgeu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v13, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv16i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv1i32.i32( - , - i32, - i64); - -define @intrinsic_vmsgeu_vx_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnot.m v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv1i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv1i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmsgeu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv2i32.i32( - , - i32, - i64); - -define @intrinsic_vmsgeu_vx_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnot.m v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv2i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv2i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmsgeu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv2i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv4i32.i32( - , - i32, - i64); - -define @intrinsic_vmsgeu_vx_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vmsltu.vx v10, v8, a0 -; CHECK-NEXT: vmnot.m v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv4i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv4i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmsgeu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v11, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv4i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv8i32.i32( - , - i32, - i64); - -define @intrinsic_vmsgeu_vx_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vmsltu.vx v12, v8, a0 -; CHECK-NEXT: vmnot.m v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv8i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv8i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmsgeu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v13, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv8i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv1i64.i64( - , - i64, - i64); - -define @intrinsic_vmsgeu_vx_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnot.m v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv1i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv1i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vmsgeu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v10, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv2i64.i64( - , - i64, - i64); - -define @intrinsic_vmsgeu_vx_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vmsltu.vx v10, v8, a0 -; CHECK-NEXT: vmnot.m v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv2i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv2i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vmsgeu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v11, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgeu.nxv4i64.i64( - , - i64, - i64); - -define @intrinsic_vmsgeu_vx_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vmsltu.vx v12, v8, a0 -; CHECK-NEXT: vmnot.m v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv4i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgeu.mask.nxv4i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vmsgeu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmxor.mm v0, v13, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4) - - ret %a -} - -define @intrinsic_vmsgeu_vi_nxv1i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, -16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv1i8.i8( - %0, - i8 -15, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgeu_mask_vi_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vi v10, v8, -15, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv1i8.i8( - %0, - %1, - i8 -14, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_vi_nxv2i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, -14 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv2i8.i8( - %0, - i8 -13, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgeu_mask_vi_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vi v10, v8, -13, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv2i8.i8( - %0, - %1, - i8 -12, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_vi_nxv4i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, -12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv4i8.i8( - %0, - i8 -11, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgeu_mask_vi_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vi v10, v8, -11, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv4i8.i8( - %0, - %1, - i8 -10, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_vi_nxv8i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, -10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv8i8.i8( - %0, - i8 -9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgeu_mask_vi_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vi v10, v8, -9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv8i8.i8( - %0, - %1, - i8 -8, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_vi_nxv16i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, -8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv16i8.i8( - %0, - i8 -7, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgeu_mask_vi_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsgtu.vi v11, v8, -7, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv16i8.i8( - %0, - %1, - i8 -6, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_vi_nxv32i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, -6 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv32i8.i8( - %0, - i8 -5, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgeu_mask_vi_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsgtu.vi v13, v8, -5, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv32i8.i8( - %0, - %1, - i8 -4, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_vi_nxv1i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, -4 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv1i16.i16( - %0, - i16 -3, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgeu_mask_vi_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vi v10, v8, -3, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv1i16.i16( - %0, - %1, - i16 -2, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_vi_nxv2i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, -2 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv2i16.i16( - %0, - i16 -1, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgeu_mask_vi_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmor.mm v0, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16( - %0, - %1, - i16 0, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_vi_nxv4i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv4i16.i16( - %0, - i16 0, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgeu_mask_vi_nxv2i16_i16_same_mask_maskedoff( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16_same_mask_maskedoff: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16( - %0, - %1, - i16 0, - %0, - i64 %2) - - ret %a -} - -define @intrinsic_vmsgeu_mask_vi_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vi v10, v8, 0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv4i16.i16( - %0, - %1, - i16 1, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_vi_nxv8i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, 1 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv8i16.i16( - %0, - i16 2, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgeu_mask_vi_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsgtu.vi v11, v8, 2, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv8i16.i16( - %0, - %1, - i16 3, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_vi_nxv16i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, 3 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv16i16.i16( - %0, - i16 4, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgeu_mask_vi_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsgtu.vi v13, v8, 4, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv16i16.i16( - %0, - %1, - i16 5, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_vi_nxv1i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, 5 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv1i32.i32( - %0, - i32 6, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgeu_mask_vi_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vi v10, v8, 6, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32( - %0, - %1, - i32 7, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_vi_nxv2i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, 7 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv2i32.i32( - %0, - i32 8, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgeu_mask_vi_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vi v10, v8, 8, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv2i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_vi_nxv4i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv4i32.i32( - %0, - i32 10, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgeu_mask_vi_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsgtu.vi v11, v8, 10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv4i32.i32( - %0, - %1, - i32 11, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_vi_nxv8i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, 11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv8i32.i32( - %0, - i32 12, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgeu_mask_vi_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsgtu.vi v13, v8, 12, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv8i32.i32( - %0, - %1, - i32 13, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_vi_nxv1i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, 13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv1i64.i64( - %0, - i64 14, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgeu_mask_vi_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vi v10, v8, 14, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64( - %0, - %1, - i64 15, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_vi_nxv2i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, 15 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv2i64.i64( - %0, - i64 16, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgeu_mask_vi_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsgtu.vi v11, v8, -16, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv2i64.i64( - %0, - %1, - i64 -15, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_vi_nxv4i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, -15 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.nxv4i64.i64( - %0, - i64 -14, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgeu_mask_vi_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsgtu.vi v13, v8, -14, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv4i64.i64( - %0, - %1, - i64 -13, - %2, - i64 %3) - - ret %a -} - -; Test cases where the mask and maskedoff are the same value. -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8( %0, %1, i8 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv1i8.i8( - %0, - %1, - i8 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8( %0, %1, i8 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv2i8.i8( - %0, - %1, - i8 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8( %0, %1, i8 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv4i8.i8( - %0, - %1, - i8 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8( %0, %1, i8 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv8i8.i8( - %0, - %1, - i8 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8( %0, %1, i8 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vmsltu.vx v10, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv16i8.i8( - %0, - %1, - i8 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8( %0, %1, i8 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vmsltu.vx v12, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv32i8.i8( - %0, - %1, - i8 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16( %0, %1, i16 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv1i16.i16( - %0, - %1, - i16 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16( %0, %1, i16 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16( - %0, - %1, - i16 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16( %0, %1, i16 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv4i16.i16( - %0, - %1, - i16 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16( %0, %1, i16 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vmsltu.vx v10, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv8i16.i16( - %0, - %1, - i16 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16( %0, %1, i16 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vmsltu.vx v12, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv16i16.i16( - %0, - %1, - i16 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32( %0, %1, i32 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32( - %0, - %1, - i32 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32( %0, %1, i32 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv2i32.i32( - %0, - %1, - i32 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32( %0, %1, i32 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vmsltu.vx v10, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv4i32.i32( - %0, - %1, - i32 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32( %0, %1, i32 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vmsltu.vx v12, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv8i32.i32( - %0, - %1, - i32 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64( %0, %1, i64 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64( %0, %1, i64 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vmsltu.vx v10, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %0, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64( %0, %1, i64 %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vmsltu.vx v12, v8, a0 -; CHECK-NEXT: vmandn.mm v0, v0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgeu.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %0, - i64 %3) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll similarity index 83% rename from llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll index 7b5cd71..cfb8bd5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vmsgeu.nxv1i8( , , - i32); + iXLen); -define @intrinsic_vmsgeu_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv1i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -26,9 +28,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv1i8( , , , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -42,13 +44,13 @@ entry: %mask = call @llvm.riscv.vmsgeu.nxv1i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgeu.mask.nxv1i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -56,9 +58,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv2i8( , , - i32); + iXLen); -define @intrinsic_vmsgeu_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -68,7 +70,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv2i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -78,9 +80,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv2i8( , , , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -94,13 +96,13 @@ entry: %mask = call @llvm.riscv.vmsgeu.nxv2i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgeu.mask.nxv2i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -108,9 +110,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv4i8( , , - i32); + iXLen); -define @intrinsic_vmsgeu_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -120,7 +122,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv4i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -130,9 +132,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv4i8( , , , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -146,13 +148,13 @@ entry: %mask = call @llvm.riscv.vmsgeu.nxv4i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgeu.mask.nxv4i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -160,9 +162,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv8i8( , , - i32); + iXLen); -define @intrinsic_vmsgeu_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -172,7 +174,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv8i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -182,9 +184,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv8i8( , , , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -198,13 +200,13 @@ entry: %mask = call @llvm.riscv.vmsgeu.nxv8i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgeu.mask.nxv8i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -212,9 +214,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv16i8( , , - i32); + iXLen); -define @intrinsic_vmsgeu_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -224,7 +226,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv16i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -234,9 +236,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv16i8( , , , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -250,13 +252,13 @@ entry: %mask = call @llvm.riscv.vmsgeu.nxv16i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgeu.mask.nxv16i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -264,9 +266,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv32i8( , , - i32); + iXLen); -define @intrinsic_vmsgeu_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -276,7 +278,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv32i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -286,9 +288,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv32i8( , , , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -302,13 +304,13 @@ entry: %mask = call @llvm.riscv.vmsgeu.nxv32i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgeu.mask.nxv32i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -316,9 +318,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv1i16( , , - i32); + iXLen); -define @intrinsic_vmsgeu_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -328,7 +330,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv1i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -338,9 +340,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv1i16( , , , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -354,13 +356,13 @@ entry: %mask = call @llvm.riscv.vmsgeu.nxv1i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgeu.mask.nxv1i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -368,9 +370,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv2i16( , , - i32); + iXLen); -define @intrinsic_vmsgeu_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -380,7 +382,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv2i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -390,9 +392,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv2i16( , , , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -406,13 +408,13 @@ entry: %mask = call @llvm.riscv.vmsgeu.nxv2i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgeu.mask.nxv2i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -420,9 +422,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv4i16( , , - i32); + iXLen); -define @intrinsic_vmsgeu_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -432,7 +434,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv4i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv4i16( , , , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -458,13 +460,13 @@ entry: %mask = call @llvm.riscv.vmsgeu.nxv4i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgeu.mask.nxv4i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -472,9 +474,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv8i16( , , - i32); + iXLen); -define @intrinsic_vmsgeu_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -484,7 +486,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv8i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -494,9 +496,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv8i16( , , , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -510,13 +512,13 @@ entry: %mask = call @llvm.riscv.vmsgeu.nxv8i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgeu.mask.nxv8i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -524,9 +526,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv16i16( , , - i32); + iXLen); -define @intrinsic_vmsgeu_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -536,7 +538,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv16i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -546,9 +548,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv16i16( , , , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -562,13 +564,13 @@ entry: %mask = call @llvm.riscv.vmsgeu.nxv16i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgeu.mask.nxv16i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -576,9 +578,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv1i32( , , - i32); + iXLen); -define @intrinsic_vmsgeu_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -588,7 +590,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv1i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -598,9 +600,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv1i32( , , , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -614,13 +616,13 @@ entry: %mask = call @llvm.riscv.vmsgeu.nxv1i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgeu.mask.nxv1i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -628,9 +630,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv2i32( , , - i32); + iXLen); -define @intrinsic_vmsgeu_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -640,7 +642,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv2i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -650,9 +652,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv2i32( , , , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -666,13 +668,13 @@ entry: %mask = call @llvm.riscv.vmsgeu.nxv2i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgeu.mask.nxv2i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -680,9 +682,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv4i32( , , - i32); + iXLen); -define @intrinsic_vmsgeu_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -692,7 +694,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv4i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -702,9 +704,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv4i32( , , , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -718,13 +720,13 @@ entry: %mask = call @llvm.riscv.vmsgeu.nxv4i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgeu.mask.nxv4i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -732,9 +734,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv8i32( , , - i32); + iXLen); -define @intrinsic_vmsgeu_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -744,7 +746,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv8i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -754,9 +756,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv8i32( , , , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -770,13 +772,13 @@ entry: %mask = call @llvm.riscv.vmsgeu.nxv8i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgeu.mask.nxv8i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -784,9 +786,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv1i64( , , - i32); + iXLen); -define @intrinsic_vmsgeu_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -796,7 +798,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv1i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -806,9 +808,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv1i64( , , , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -822,13 +824,13 @@ entry: %mask = call @llvm.riscv.vmsgeu.nxv1i64( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgeu.mask.nxv1i64( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -836,9 +838,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv2i64( , , - i32); + iXLen); -define @intrinsic_vmsgeu_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -848,7 +850,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv2i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -858,9 +860,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv2i64( , , , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -874,13 +876,13 @@ entry: %mask = call @llvm.riscv.vmsgeu.nxv2i64( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgeu.mask.nxv2i64( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -888,9 +890,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv4i64( , , - i32); + iXLen); -define @intrinsic_vmsgeu_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -900,7 +902,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv4i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -910,9 +912,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv4i64( , , , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -926,13 +928,13 @@ entry: %mask = call @llvm.riscv.vmsgeu.nxv4i64( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgeu.mask.nxv4i64( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -940,9 +942,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv1i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsgeu_vx_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -953,7 +955,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv1i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -963,9 +965,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv1i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -980,7 +982,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -988,9 +990,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv2i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsgeu_vx_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -1001,7 +1003,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv2i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1011,9 +1013,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv2i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1028,7 +1030,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1036,9 +1038,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv4i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsgeu_vx_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -1049,7 +1051,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv4i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1059,9 +1061,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv4i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1076,7 +1078,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1084,9 +1086,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv8i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsgeu_vx_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -1097,7 +1099,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv8i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1107,9 +1109,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv8i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1124,7 +1126,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1132,9 +1134,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv16i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsgeu_vx_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -1145,7 +1147,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv16i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1155,9 +1157,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv16i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1172,7 +1174,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1180,9 +1182,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv32i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsgeu_vx_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -1193,7 +1195,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv32i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1203,9 +1205,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv32i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1220,7 +1222,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1228,9 +1230,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv1i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsgeu_vx_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -1241,7 +1243,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv1i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1251,9 +1253,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv1i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1268,7 +1270,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1276,9 +1278,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv2i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsgeu_vx_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -1289,7 +1291,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv2i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1299,9 +1301,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv2i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1316,7 +1318,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1324,9 +1326,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv4i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsgeu_vx_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -1337,7 +1339,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv4i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1347,9 +1349,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv4i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1364,7 +1366,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1372,9 +1374,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv8i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsgeu_vx_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -1385,7 +1387,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv8i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1395,9 +1397,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv8i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1412,7 +1414,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1420,9 +1422,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv16i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsgeu_vx_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -1433,7 +1435,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv16i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1443,9 +1445,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv16i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1460,7 +1462,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1468,9 +1470,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv1i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsgeu_vx_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -1481,7 +1483,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv1i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1491,9 +1493,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv1i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1508,7 +1510,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1516,9 +1518,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv2i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsgeu_vx_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -1529,7 +1531,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv2i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1539,9 +1541,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv2i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1556,7 +1558,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1564,9 +1566,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv4i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsgeu_vx_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -1577,7 +1579,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv4i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1587,9 +1589,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv4i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1604,7 +1606,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1612,9 +1614,9 @@ entry: declare @llvm.riscv.vmsgeu.nxv8i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsgeu_vx_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -1625,7 +1627,7 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv8i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1635,9 +1637,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv8i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsgeu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgeu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1652,7 +1654,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1660,25 +1662,32 @@ entry: declare @llvm.riscv.vmsgeu.nxv1i64.i64( , i64, - i32); - -define @intrinsic_vmsgeu_vx_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vmsleu.vv v0, v9, v8 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsgeu_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsgeu_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vmsleu.vv v0, v9, v8 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsgeu_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmsltu.vx v8, v8, a0 +; RV64-NEXT: vmnot.m v0, v8 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv1i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1688,30 +1697,39 @@ declare @llvm.riscv.vmsgeu.mask.nxv1i64.i64( , i64, , - i32); - -define @intrinsic_vmsgeu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v11, (a0), zero -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vv v10, v11, v8, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsgeu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v11, (a0), zero +; RV32-NEXT: vmv1r.v v10, v0 +; RV32-NEXT: vmv1r.v v0, v9 +; RV32-NEXT: vmsleu.vv v10, v11, v8, v0.t +; RV32-NEXT: vmv.v.v v0, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v10, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vmv1r.v v0, v9 +; RV64-NEXT: vmsltu.vx v10, v8, a0, v0.t +; RV64-NEXT: vmxor.mm v0, v10, v9 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64( %0, %1, i64 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1719,25 +1737,32 @@ entry: declare @llvm.riscv.vmsgeu.nxv2i64.i64( , i64, - i32); - -define @intrinsic_vmsgeu_vx_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vmsleu.vv v0, v10, v8 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsgeu_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsgeu_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vmsleu.vv v0, v10, v8 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsgeu_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vmsltu.vx v10, v8, a0 +; RV64-NEXT: vmnot.m v0, v10 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv2i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1747,30 +1772,39 @@ declare @llvm.riscv.vmsgeu.mask.nxv2i64.i64( , i64, , - i32); - -define @intrinsic_vmsgeu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsleu.vv v11, v12, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsgeu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmv1r.v v11, v0 +; RV32-NEXT: vmv1r.v v0, v10 +; RV32-NEXT: vmsleu.vv v11, v12, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v11 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v11, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vmv1r.v v0, v10 +; RV64-NEXT: vmsltu.vx v11, v8, a0, v0.t +; RV64-NEXT: vmxor.mm v0, v11, v10 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv2i64.i64( %0, %1, i64 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1778,25 +1812,32 @@ entry: declare @llvm.riscv.vmsgeu.nxv4i64.i64( , i64, - i32); - -define @intrinsic_vmsgeu_vx_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmsleu.vv v0, v12, v8 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsgeu_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsgeu_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmsleu.vv v0, v12, v8 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsgeu_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmsltu.vx v12, v8, a0 +; RV64-NEXT: vmnot.m v0, v12 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv4i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1806,35 +1847,44 @@ declare @llvm.riscv.vmsgeu.mask.nxv4i64.i64( , i64, , - i32); - -define @intrinsic_vmsgeu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsleu.vv v13, v16, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsgeu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vmv1r.v v13, v0 +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: vmsleu.vv v13, v16, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v13 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v13, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: vmsltu.vx v13, v8, a0, v0.t +; RV64-NEXT: vmxor.mm v0, v13, v12 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv4i64.i64( %0, %1, i64 %2, %3, - i32 %4) + iXLen %4) ret %a } -define @intrinsic_vmsgeu_vi_nxv1i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsgeu_vi_nxv1i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -1844,12 +1894,12 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv1i8.i8( %0, i8 -15, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgeu_mask_vi_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_mask_vi_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1864,12 +1914,12 @@ entry: %1, i8 -14, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_vi_nxv2i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsgeu_vi_nxv2i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -1879,12 +1929,12 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv2i8.i8( %0, i8 -13, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgeu_mask_vi_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_mask_vi_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1899,12 +1949,12 @@ entry: %1, i8 -12, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_vi_nxv4i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsgeu_vi_nxv4i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -1914,12 +1964,12 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv4i8.i8( %0, i8 -11, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgeu_mask_vi_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_mask_vi_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1934,12 +1984,12 @@ entry: %1, i8 -10, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_vi_nxv8i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsgeu_vi_nxv8i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -1949,12 +1999,12 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv8i8.i8( %0, i8 -9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgeu_mask_vi_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_mask_vi_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1969,12 +2019,12 @@ entry: %1, i8 -8, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_vi_nxv16i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsgeu_vi_nxv16i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -1984,12 +2034,12 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv16i8.i8( %0, i8 -7, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgeu_mask_vi_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_mask_vi_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2004,12 +2054,12 @@ entry: %1, i8 -6, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_vi_nxv32i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsgeu_vi_nxv32i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -2019,12 +2069,12 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv32i8.i8( %0, i8 -5, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgeu_mask_vi_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_mask_vi_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2039,12 +2089,12 @@ entry: %1, i8 -4, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_vi_nxv1i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsgeu_vi_nxv1i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -2054,12 +2104,12 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv1i16.i16( %0, i16 -3, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgeu_mask_vi_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_mask_vi_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2074,12 +2124,12 @@ entry: %1, i16 -2, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_vi_nxv2i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsgeu_vi_nxv2i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -2089,12 +2139,12 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv2i16.i16( %0, i16 -1, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgeu_mask_vi_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_mask_vi_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -2106,12 +2156,12 @@ entry: %1, i16 0, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_mask_vi_nxv2i16_i16_same_mask_maskedoff( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgeu_mask_vi_nxv2i16_i16_same_mask_maskedoff( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16_same_mask_maskedoff: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: ret @@ -2121,12 +2171,12 @@ entry: %1, i16 0, %0, - i32 %2) + iXLen %2) ret %a } -define @intrinsic_vmsgeu_vi_nxv4i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsgeu_vi_nxv4i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -2136,12 +2186,12 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv4i16.i16( %0, i16 0, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgeu_mask_vi_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_mask_vi_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2156,12 +2206,12 @@ entry: %1, i16 1, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_vi_nxv8i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsgeu_vi_nxv8i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -2171,12 +2221,12 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv8i16.i16( %0, i16 2, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgeu_mask_vi_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_mask_vi_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2191,12 +2241,12 @@ entry: %1, i16 3, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_vi_nxv16i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsgeu_vi_nxv16i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -2206,12 +2256,12 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv16i16.i16( %0, i16 4, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgeu_mask_vi_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_mask_vi_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2226,12 +2276,12 @@ entry: %1, i16 5, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_vi_nxv1i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmsgeu_vi_nxv1i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -2241,12 +2291,12 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv1i32.i32( %0, i32 6, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgeu_mask_vi_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_mask_vi_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2261,12 +2311,12 @@ entry: %1, i32 7, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_vi_nxv2i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmsgeu_vi_nxv2i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -2276,12 +2326,12 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv2i32.i32( %0, i32 8, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgeu_mask_vi_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_mask_vi_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2296,12 +2346,12 @@ entry: %1, i32 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_vi_nxv4i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmsgeu_vi_nxv4i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -2311,12 +2361,12 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv4i32.i32( %0, i32 10, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgeu_mask_vi_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_mask_vi_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2331,12 +2381,12 @@ entry: %1, i32 11, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_vi_nxv8i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmsgeu_vi_nxv8i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -2346,12 +2396,12 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv8i32.i32( %0, i32 12, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgeu_mask_vi_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_mask_vi_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2366,12 +2416,12 @@ entry: %1, i32 13, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_vi_nxv1i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vmsgeu_vi_nxv1i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -2381,12 +2431,12 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv1i64.i64( %0, i64 14, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgeu_mask_vi_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_mask_vi_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2401,12 +2451,12 @@ entry: %1, i64 15, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_vi_nxv2i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vmsgeu_vi_nxv2i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -2416,12 +2466,12 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv2i64.i64( %0, i64 16, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgeu_mask_vi_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_mask_vi_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2436,12 +2486,12 @@ entry: %1, i64 -15, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_vi_nxv4i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vmsgeu_vi_nxv4i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -2451,12 +2501,12 @@ entry: %a = call @llvm.riscv.vmsgeu.nxv4i64.i64( %0, i64 -14, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgeu_mask_vi_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_mask_vi_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2471,13 +2521,13 @@ entry: %1, i64 -13, %2, - i32 %3) + iXLen %3) ret %a } ; Test cases where the mask and maskedoff are the same value. -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8( %0, %1, i8 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -2490,12 +2540,12 @@ entry: %1, i8 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8( %0, %1, i8 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -2508,12 +2558,12 @@ entry: %1, i8 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8( %0, %1, i8 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -2526,12 +2576,12 @@ entry: %1, i8 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8( %0, %1, i8 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -2544,12 +2594,12 @@ entry: %1, i8 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8( %0, %1, i8 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -2562,12 +2612,12 @@ entry: %1, i8 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8( %0, %1, i8 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -2580,12 +2630,12 @@ entry: %1, i8 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16( %0, %1, i16 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -2598,12 +2648,12 @@ entry: %1, i16 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16( %0, %1, i16 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -2616,12 +2666,12 @@ entry: %1, i16 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16( %0, %1, i16 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -2634,12 +2684,12 @@ entry: %1, i16 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16( %0, %1, i16 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -2652,12 +2702,12 @@ entry: %1, i16 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16( %0, %1, i16 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -2670,12 +2720,12 @@ entry: %1, i16 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -2688,12 +2738,12 @@ entry: %1, i32 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -2706,12 +2756,12 @@ entry: %1, i32 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -2724,12 +2774,12 @@ entry: %1, i32 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -2742,80 +2792,101 @@ entry: %1, i32 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64( %0, %1, i64 %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vmsleu.vv v0, v9, v8, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vmsleu.vv v0, v9, v8, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmsltu.vx v8, v8, a0 +; RV64-NEXT: vmandn.mm v0, v0, v8 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64( %0, %1, i64 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64( %0, %1, i64 %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vmsleu.vv v10, v12, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmv1r.v v10, v0 +; RV32-NEXT: vmsleu.vv v10, v12, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vmsltu.vx v10, v8, a0 +; RV64-NEXT: vmandn.mm v0, v0, v10 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv2i64.i64( %0, %1, i64 %2, %0, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64( %0, %1, i64 %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: vmsleu.vv v12, v16, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vmv1r.v v12, v0 +; RV32-NEXT: vmsleu.vv v12, v16, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmsltu.vx v12, v8, a0 +; RV64-NEXT: vmandn.mm v0, v0, v12 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv4i64.i64( %0, %1, i64 %2, %0, - i32 %3) + iXLen %3) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll deleted file mode 100644 index c86f7e4..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll +++ /dev/null @@ -1,2414 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmsgt.nxv1i8( - , - , - i64); - -define @intrinsic_vmsgt_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv1i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv1i8( - , - , - , - , - i64); - -define @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmslt.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgt.nxv1i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgt.mask.nxv1i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv2i8( - , - , - i64); - -define @intrinsic_vmsgt_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv2i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv2i8( - , - , - , - , - i64); - -define @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmslt.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgt.nxv2i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgt.mask.nxv2i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv4i8( - , - , - i64); - -define @intrinsic_vmsgt_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv4i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv4i8( - , - , - , - , - i64); - -define @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmslt.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgt.nxv4i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgt.mask.nxv4i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv8i8( - , - , - i64); - -define @intrinsic_vmsgt_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv8i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv8i8( - , - , - , - , - i64); - -define @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmslt.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgt.nxv8i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgt.mask.nxv8i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv16i8( - , - , - i64); - -define @intrinsic_vmsgt_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmslt.vv v0, v10, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv16i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv16i8( - , - , - , - , - i64); - -define @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmslt.vv v14, v10, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmslt.vv v8, v12, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgt.nxv16i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgt.mask.nxv16i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv32i8( - , - , - i64); - -define @intrinsic_vmsgt_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vv_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmslt.vv v0, v12, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv32i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv32i8( - , - , - , - , - i64); - -define @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmslt.vv v20, v12, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmslt.vv v8, v16, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgt.nxv32i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgt.mask.nxv32i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv1i16( - , - , - i64); - -define @intrinsic_vmsgt_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv1i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv1i16( - , - , - , - , - i64); - -define @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmslt.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgt.nxv1i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgt.mask.nxv1i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv2i16( - , - , - i64); - -define @intrinsic_vmsgt_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv2i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv2i16( - , - , - , - , - i64); - -define @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmslt.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgt.nxv2i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgt.mask.nxv2i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv4i16( - , - , - i64); - -define @intrinsic_vmsgt_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv4i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv4i16( - , - , - , - , - i64); - -define @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmslt.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgt.nxv4i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgt.mask.nxv4i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv8i16( - , - , - i64); - -define @intrinsic_vmsgt_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmslt.vv v0, v10, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv8i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv8i16( - , - , - , - , - i64); - -define @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmslt.vv v14, v10, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmslt.vv v8, v12, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgt.nxv8i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgt.mask.nxv8i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv16i16( - , - , - i64); - -define @intrinsic_vmsgt_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmslt.vv v0, v12, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv16i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv16i16( - , - , - , - , - i64); - -define @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmslt.vv v20, v12, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmslt.vv v8, v16, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgt.nxv16i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgt.mask.nxv16i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv1i32( - , - , - i64); - -define @intrinsic_vmsgt_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv1i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv1i32( - , - , - , - , - i64); - -define @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmslt.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgt.nxv1i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgt.mask.nxv1i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv2i32( - , - , - i64); - -define @intrinsic_vmsgt_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv2i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv2i32( - , - , - , - , - i64); - -define @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmslt.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgt.nxv2i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgt.mask.nxv2i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv4i32( - , - , - i64); - -define @intrinsic_vmsgt_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmslt.vv v0, v10, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv4i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv4i32( - , - , - , - , - i64); - -define @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmslt.vv v14, v10, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmslt.vv v8, v12, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgt.nxv4i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgt.mask.nxv4i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv8i32( - , - , - i64); - -define @intrinsic_vmsgt_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmslt.vv v0, v12, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv8i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv8i32( - , - , - , - , - i64); - -define @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmslt.vv v20, v12, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmslt.vv v8, v16, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgt.nxv8i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgt.mask.nxv8i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv1i64( - , - , - i64); - -define @intrinsic_vmsgt_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv1i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv1i64( - , - , - , - , - i64); - -define @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmslt.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgt.nxv1i64( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgt.mask.nxv1i64( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv2i64( - , - , - i64); - -define @intrinsic_vmsgt_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmslt.vv v0, v10, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv2i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv2i64( - , - , - , - , - i64); - -define @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmslt.vv v14, v10, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmslt.vv v8, v12, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgt.nxv2i64( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgt.mask.nxv2i64( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv4i64( - , - , - i64); - -define @intrinsic_vmsgt_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmslt.vv v0, v12, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv4i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv4i64( - , - , - , - , - i64); - -define @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmslt.vv v20, v12, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmslt.vv v8, v16, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgt.nxv4i64( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgt.mask.nxv4i64( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv1i8.i8( - , - i8, - i64); - -define @intrinsic_vmsgt_vx_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vmsgt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv1i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv1i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsgt_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv1i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv2i8.i8( - , - i8, - i64); - -define @intrinsic_vmsgt_vx_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vmsgt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv2i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv2i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsgt_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv2i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv4i8.i8( - , - i8, - i64); - -define @intrinsic_vmsgt_vx_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vmsgt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv4i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv4i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsgt_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv4i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv8i8.i8( - , - i8, - i64); - -define @intrinsic_vmsgt_vx_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vmsgt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv8i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv8i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsgt_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv8i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv16i8.i8( - , - i8, - i64); - -define @intrinsic_vmsgt_vx_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vx_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vmsgt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv16i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv16i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsgt_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsgt.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv16i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv32i8.i8( - , - i8, - i64); - -define @intrinsic_vmsgt_vx_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vx_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vmsgt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv32i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv32i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsgt_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsgt.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv32i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv1i16.i16( - , - i16, - i64); - -define @intrinsic_vmsgt_vx_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vmsgt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv1i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv1i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsgt_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv1i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv2i16.i16( - , - i16, - i64); - -define @intrinsic_vmsgt_vx_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vmsgt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv2i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv2i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsgt_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv2i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv4i16.i16( - , - i16, - i64); - -define @intrinsic_vmsgt_vx_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vmsgt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv4i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv4i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsgt_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv4i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv8i16.i16( - , - i16, - i64); - -define @intrinsic_vmsgt_vx_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vmsgt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv8i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv8i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsgt_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsgt.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv8i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv16i16.i16( - , - i16, - i64); - -define @intrinsic_vmsgt_vx_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vx_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vmsgt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv16i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv16i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsgt_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsgt.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv16i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv1i32.i32( - , - i32, - i64); - -define @intrinsic_vmsgt_vx_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vmsgt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv1i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv1i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmsgt_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv1i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv2i32.i32( - , - i32, - i64); - -define @intrinsic_vmsgt_vx_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vmsgt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv2i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv2i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmsgt_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv2i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv4i32.i32( - , - i32, - i64); - -define @intrinsic_vmsgt_vx_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vmsgt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv4i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv4i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmsgt_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsgt.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv4i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv8i32.i32( - , - i32, - i64); - -define @intrinsic_vmsgt_vx_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vmsgt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv8i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv8i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmsgt_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsgt.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv8i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv1i64.i64( - , - i64, - i64); - -define @intrinsic_vmsgt_vx_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vmsgt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv1i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv1i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vmsgt_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv2i64.i64( - , - i64, - i64); - -define @intrinsic_vmsgt_vx_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vmsgt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv2i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv2i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vmsgt_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsgt.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgt.nxv4i64.i64( - , - i64, - i64); - -define @intrinsic_vmsgt_vx_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vmsgt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv4i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgt.mask.nxv4i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vmsgt_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsgt.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4) - - ret %a -} - -define @intrinsic_vmsgt_vi_nxv1i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv1i8.i8( - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgt_mask_vi_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv1i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgt_vi_nxv2i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv2i8.i8( - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgt_mask_vi_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv2i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgt_vi_nxv4i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv4i8.i8( - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgt_mask_vi_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv4i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgt_vi_nxv8i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv8i8.i8( - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgt_mask_vi_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv8i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgt_vi_nxv16i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vi_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv16i8.i8( - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgt_mask_vi_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsgt.vi v11, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv16i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgt_vi_nxv32i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vi_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv32i8.i8( - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgt_mask_vi_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsgt.vi v13, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv32i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgt_vi_nxv1i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv1i16.i16( - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgt_mask_vi_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv1i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgt_vi_nxv2i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv2i16.i16( - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgt_mask_vi_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv2i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgt_vi_nxv4i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv4i16.i16( - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgt_mask_vi_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv4i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgt_vi_nxv8i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv8i16.i16( - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgt_mask_vi_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsgt.vi v11, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv8i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgt_vi_nxv16i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vi_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv16i16.i16( - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgt_mask_vi_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsgt.vi v13, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv16i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgt_vi_nxv1i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv1i32.i32( - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgt_mask_vi_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv1i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgt_vi_nxv2i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv2i32.i32( - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgt_mask_vi_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv2i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgt_vi_nxv4i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv4i32.i32( - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgt_mask_vi_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsgt.vi v11, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv4i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgt_vi_nxv8i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv8i32.i32( - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgt_mask_vi_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsgt.vi v13, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv8i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgt_vi_nxv1i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv1i64.i64( - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgt_mask_vi_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv1i64.i64( - %0, - %1, - i64 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgt_vi_nxv2i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv2i64.i64( - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgt_mask_vi_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsgt.vi v11, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv2i64.i64( - %0, - %1, - i64 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgt_vi_nxv4i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmsgt.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.nxv4i64.i64( - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgt_mask_vi_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsgt.vi v13, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgt.mask.nxv4i64.i64( - %0, - %1, - i64 9, - %2, - i64 %3) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll similarity index 84% rename from llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vmsgt.ll index fc820e0..bde1030 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vmsgt.nxv1i8( , , - i32); + iXLen); -define @intrinsic_vmsgt_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv1i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -26,9 +28,9 @@ declare @llvm.riscv.vmsgt.mask.nxv1i8( , , , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -42,13 +44,13 @@ entry: %mask = call @llvm.riscv.vmsgt.nxv1i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgt.mask.nxv1i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -56,9 +58,9 @@ entry: declare @llvm.riscv.vmsgt.nxv2i8( , , - i32); + iXLen); -define @intrinsic_vmsgt_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -68,7 +70,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv2i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -78,9 +80,9 @@ declare @llvm.riscv.vmsgt.mask.nxv2i8( , , , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -94,13 +96,13 @@ entry: %mask = call @llvm.riscv.vmsgt.nxv2i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgt.mask.nxv2i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -108,9 +110,9 @@ entry: declare @llvm.riscv.vmsgt.nxv4i8( , , - i32); + iXLen); -define @intrinsic_vmsgt_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -120,7 +122,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv4i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -130,9 +132,9 @@ declare @llvm.riscv.vmsgt.mask.nxv4i8( , , , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -146,13 +148,13 @@ entry: %mask = call @llvm.riscv.vmsgt.nxv4i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgt.mask.nxv4i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -160,9 +162,9 @@ entry: declare @llvm.riscv.vmsgt.nxv8i8( , , - i32); + iXLen); -define @intrinsic_vmsgt_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -172,7 +174,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv8i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -182,9 +184,9 @@ declare @llvm.riscv.vmsgt.mask.nxv8i8( , , , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -198,13 +200,13 @@ entry: %mask = call @llvm.riscv.vmsgt.nxv8i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgt.mask.nxv8i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -212,9 +214,9 @@ entry: declare @llvm.riscv.vmsgt.nxv16i8( , , - i32); + iXLen); -define @intrinsic_vmsgt_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -224,7 +226,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv16i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -234,9 +236,9 @@ declare @llvm.riscv.vmsgt.mask.nxv16i8( , , , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -250,13 +252,13 @@ entry: %mask = call @llvm.riscv.vmsgt.nxv16i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgt.mask.nxv16i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -264,9 +266,9 @@ entry: declare @llvm.riscv.vmsgt.nxv32i8( , , - i32); + iXLen); -define @intrinsic_vmsgt_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -276,7 +278,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv32i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -286,9 +288,9 @@ declare @llvm.riscv.vmsgt.mask.nxv32i8( , , , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -302,13 +304,13 @@ entry: %mask = call @llvm.riscv.vmsgt.nxv32i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgt.mask.nxv32i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -316,9 +318,9 @@ entry: declare @llvm.riscv.vmsgt.nxv1i16( , , - i32); + iXLen); -define @intrinsic_vmsgt_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -328,7 +330,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv1i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -338,9 +340,9 @@ declare @llvm.riscv.vmsgt.mask.nxv1i16( , , , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -354,13 +356,13 @@ entry: %mask = call @llvm.riscv.vmsgt.nxv1i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgt.mask.nxv1i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -368,9 +370,9 @@ entry: declare @llvm.riscv.vmsgt.nxv2i16( , , - i32); + iXLen); -define @intrinsic_vmsgt_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -380,7 +382,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv2i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -390,9 +392,9 @@ declare @llvm.riscv.vmsgt.mask.nxv2i16( , , , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -406,13 +408,13 @@ entry: %mask = call @llvm.riscv.vmsgt.nxv2i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgt.mask.nxv2i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -420,9 +422,9 @@ entry: declare @llvm.riscv.vmsgt.nxv4i16( , , - i32); + iXLen); -define @intrinsic_vmsgt_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -432,7 +434,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv4i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vmsgt.mask.nxv4i16( , , , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -458,13 +460,13 @@ entry: %mask = call @llvm.riscv.vmsgt.nxv4i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgt.mask.nxv4i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -472,9 +474,9 @@ entry: declare @llvm.riscv.vmsgt.nxv8i16( , , - i32); + iXLen); -define @intrinsic_vmsgt_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -484,7 +486,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv8i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -494,9 +496,9 @@ declare @llvm.riscv.vmsgt.mask.nxv8i16( , , , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -510,13 +512,13 @@ entry: %mask = call @llvm.riscv.vmsgt.nxv8i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgt.mask.nxv8i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -524,9 +526,9 @@ entry: declare @llvm.riscv.vmsgt.nxv16i16( , , - i32); + iXLen); -define @intrinsic_vmsgt_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -536,7 +538,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv16i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -546,9 +548,9 @@ declare @llvm.riscv.vmsgt.mask.nxv16i16( , , , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -562,13 +564,13 @@ entry: %mask = call @llvm.riscv.vmsgt.nxv16i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgt.mask.nxv16i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -576,9 +578,9 @@ entry: declare @llvm.riscv.vmsgt.nxv1i32( , , - i32); + iXLen); -define @intrinsic_vmsgt_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -588,7 +590,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv1i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -598,9 +600,9 @@ declare @llvm.riscv.vmsgt.mask.nxv1i32( , , , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -614,13 +616,13 @@ entry: %mask = call @llvm.riscv.vmsgt.nxv1i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgt.mask.nxv1i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -628,9 +630,9 @@ entry: declare @llvm.riscv.vmsgt.nxv2i32( , , - i32); + iXLen); -define @intrinsic_vmsgt_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -640,7 +642,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv2i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -650,9 +652,9 @@ declare @llvm.riscv.vmsgt.mask.nxv2i32( , , , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -666,13 +668,13 @@ entry: %mask = call @llvm.riscv.vmsgt.nxv2i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgt.mask.nxv2i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -680,9 +682,9 @@ entry: declare @llvm.riscv.vmsgt.nxv4i32( , , - i32); + iXLen); -define @intrinsic_vmsgt_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -692,7 +694,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv4i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -702,9 +704,9 @@ declare @llvm.riscv.vmsgt.mask.nxv4i32( , , , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -718,13 +720,13 @@ entry: %mask = call @llvm.riscv.vmsgt.nxv4i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgt.mask.nxv4i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -732,9 +734,9 @@ entry: declare @llvm.riscv.vmsgt.nxv8i32( , , - i32); + iXLen); -define @intrinsic_vmsgt_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -744,7 +746,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv8i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -754,9 +756,9 @@ declare @llvm.riscv.vmsgt.mask.nxv8i32( , , , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -770,13 +772,13 @@ entry: %mask = call @llvm.riscv.vmsgt.nxv8i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgt.mask.nxv8i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -784,9 +786,9 @@ entry: declare @llvm.riscv.vmsgt.nxv1i64( , , - i32); + iXLen); -define @intrinsic_vmsgt_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -796,7 +798,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv1i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -806,9 +808,9 @@ declare @llvm.riscv.vmsgt.mask.nxv1i64( , , , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -822,13 +824,13 @@ entry: %mask = call @llvm.riscv.vmsgt.nxv1i64( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgt.mask.nxv1i64( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -836,9 +838,9 @@ entry: declare @llvm.riscv.vmsgt.nxv2i64( , , - i32); + iXLen); -define @intrinsic_vmsgt_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -848,7 +850,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv2i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -858,9 +860,9 @@ declare @llvm.riscv.vmsgt.mask.nxv2i64( , , , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -874,13 +876,13 @@ entry: %mask = call @llvm.riscv.vmsgt.nxv2i64( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgt.mask.nxv2i64( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -888,9 +890,9 @@ entry: declare @llvm.riscv.vmsgt.nxv4i64( , , - i32); + iXLen); -define @intrinsic_vmsgt_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -900,7 +902,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv4i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -910,9 +912,9 @@ declare @llvm.riscv.vmsgt.mask.nxv4i64( , , , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -926,13 +928,13 @@ entry: %mask = call @llvm.riscv.vmsgt.nxv4i64( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgt.mask.nxv4i64( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -940,9 +942,9 @@ entry: declare @llvm.riscv.vmsgt.nxv1i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsgt_vx_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -952,7 +954,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv1i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -962,9 +964,9 @@ declare @llvm.riscv.vmsgt.mask.nxv1i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -979,7 +981,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -987,9 +989,9 @@ entry: declare @llvm.riscv.vmsgt.nxv2i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsgt_vx_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -999,7 +1001,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv2i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1009,9 +1011,9 @@ declare @llvm.riscv.vmsgt.mask.nxv2i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1026,7 +1028,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1034,9 +1036,9 @@ entry: declare @llvm.riscv.vmsgt.nxv4i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsgt_vx_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -1046,7 +1048,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv4i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1056,9 +1058,9 @@ declare @llvm.riscv.vmsgt.mask.nxv4i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1073,7 +1075,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1081,9 +1083,9 @@ entry: declare @llvm.riscv.vmsgt.nxv8i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsgt_vx_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -1093,7 +1095,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv8i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1103,9 +1105,9 @@ declare @llvm.riscv.vmsgt.mask.nxv8i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1120,7 +1122,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1128,9 +1130,9 @@ entry: declare @llvm.riscv.vmsgt.nxv16i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsgt_vx_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -1140,7 +1142,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv16i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1150,9 +1152,9 @@ declare @llvm.riscv.vmsgt.mask.nxv16i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1167,7 +1169,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1175,9 +1177,9 @@ entry: declare @llvm.riscv.vmsgt.nxv32i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsgt_vx_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -1187,7 +1189,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv32i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1197,9 +1199,9 @@ declare @llvm.riscv.vmsgt.mask.nxv32i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1214,7 +1216,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1222,9 +1224,9 @@ entry: declare @llvm.riscv.vmsgt.nxv1i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsgt_vx_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -1234,7 +1236,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv1i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1244,9 +1246,9 @@ declare @llvm.riscv.vmsgt.mask.nxv1i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1261,7 +1263,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1269,9 +1271,9 @@ entry: declare @llvm.riscv.vmsgt.nxv2i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsgt_vx_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -1281,7 +1283,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv2i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1291,9 +1293,9 @@ declare @llvm.riscv.vmsgt.mask.nxv2i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1308,7 +1310,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1316,9 +1318,9 @@ entry: declare @llvm.riscv.vmsgt.nxv4i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsgt_vx_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -1328,7 +1330,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv4i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1338,9 +1340,9 @@ declare @llvm.riscv.vmsgt.mask.nxv4i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1355,7 +1357,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1363,9 +1365,9 @@ entry: declare @llvm.riscv.vmsgt.nxv8i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsgt_vx_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -1375,7 +1377,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv8i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1385,9 +1387,9 @@ declare @llvm.riscv.vmsgt.mask.nxv8i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1402,7 +1404,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1410,9 +1412,9 @@ entry: declare @llvm.riscv.vmsgt.nxv16i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsgt_vx_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -1422,7 +1424,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv16i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1432,9 +1434,9 @@ declare @llvm.riscv.vmsgt.mask.nxv16i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1449,7 +1451,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1457,9 +1459,9 @@ entry: declare @llvm.riscv.vmsgt.nxv1i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsgt_vx_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -1469,7 +1471,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv1i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1479,9 +1481,9 @@ declare @llvm.riscv.vmsgt.mask.nxv1i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1496,7 +1498,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1504,9 +1506,9 @@ entry: declare @llvm.riscv.vmsgt.nxv2i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsgt_vx_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -1516,7 +1518,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv2i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1526,9 +1528,9 @@ declare @llvm.riscv.vmsgt.mask.nxv2i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1543,7 +1545,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1551,9 +1553,9 @@ entry: declare @llvm.riscv.vmsgt.nxv4i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsgt_vx_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -1563,7 +1565,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv4i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1573,9 +1575,9 @@ declare @llvm.riscv.vmsgt.mask.nxv4i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1590,7 +1592,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1598,9 +1600,9 @@ entry: declare @llvm.riscv.vmsgt.nxv8i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsgt_vx_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsgt_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -1610,7 +1612,7 @@ entry: %a = call @llvm.riscv.vmsgt.nxv8i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1620,9 +1622,9 @@ declare @llvm.riscv.vmsgt.mask.nxv8i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsgt_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgt_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1637,7 +1639,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1645,25 +1647,31 @@ entry: declare @llvm.riscv.vmsgt.nxv1i64.i64( , i64, - i32); - -define @intrinsic_vmsgt_vx_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vmslt.vv v0, v9, v8 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsgt_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsgt_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vmslt.vv v0, v9, v8 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsgt_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmsgt.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsgt.nxv1i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1673,30 +1681,39 @@ declare @llvm.riscv.vmsgt.mask.nxv1i64.i64( , i64, , - i32); - -define @intrinsic_vmsgt_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v11, (a0), zero -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vv v10, v11, v8, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsgt_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v11, (a0), zero +; RV32-NEXT: vmv1r.v v10, v0 +; RV32-NEXT: vmv1r.v v0, v9 +; RV32-NEXT: vmslt.vv v10, v11, v8, v0.t +; RV32-NEXT: vmv.v.v v0, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v10, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vmv1r.v v0, v9 +; RV64-NEXT: vmsgt.vx v10, v8, a0, v0.t +; RV64-NEXT: vmv.v.v v0, v10 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsgt.mask.nxv1i64.i64( %0, %1, i64 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1704,25 +1721,31 @@ entry: declare @llvm.riscv.vmsgt.nxv2i64.i64( , i64, - i32); - -define @intrinsic_vmsgt_vx_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vmslt.vv v0, v10, v8 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsgt_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsgt_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vmslt.vv v0, v10, v8 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsgt_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vmsgt.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsgt.nxv2i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1732,30 +1755,39 @@ declare @llvm.riscv.vmsgt.mask.nxv2i64.i64( , i64, , - i32); - -define @intrinsic_vmsgt_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmslt.vv v11, v12, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsgt_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmv1r.v v11, v0 +; RV32-NEXT: vmv1r.v v0, v10 +; RV32-NEXT: vmslt.vv v11, v12, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v11 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v11, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vmv1r.v v0, v10 +; RV64-NEXT: vmsgt.vx v11, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v11 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsgt.mask.nxv2i64.i64( %0, %1, i64 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1763,25 +1795,31 @@ entry: declare @llvm.riscv.vmsgt.nxv4i64.i64( , i64, - i32); - -define @intrinsic_vmsgt_vx_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmslt.vv v0, v12, v8 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsgt_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsgt_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmslt.vv v0, v12, v8 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsgt_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmsgt.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsgt.nxv4i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1791,35 +1829,44 @@ declare @llvm.riscv.vmsgt.mask.nxv4i64.i64( , i64, , - i32); - -define @intrinsic_vmsgt_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmslt.vv v13, v16, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsgt_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vmv1r.v v13, v0 +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: vmslt.vv v13, v16, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v13 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v13, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: vmsgt.vx v13, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v13 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsgt.mask.nxv4i64.i64( %0, %1, i64 %2, %3, - i32 %4) + iXLen %4) ret %a } -define @intrinsic_vmsgt_vi_nxv1i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsgt_vi_nxv1i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -1829,12 +1876,12 @@ entry: %a = call @llvm.riscv.vmsgt.nxv1i8.i8( %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgt_mask_vi_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgt_mask_vi_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1849,12 +1896,12 @@ entry: %1, i8 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgt_vi_nxv2i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsgt_vi_nxv2i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -1864,12 +1911,12 @@ entry: %a = call @llvm.riscv.vmsgt.nxv2i8.i8( %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgt_mask_vi_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgt_mask_vi_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1884,12 +1931,12 @@ entry: %1, i8 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgt_vi_nxv4i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsgt_vi_nxv4i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -1899,12 +1946,12 @@ entry: %a = call @llvm.riscv.vmsgt.nxv4i8.i8( %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgt_mask_vi_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgt_mask_vi_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1919,12 +1966,12 @@ entry: %1, i8 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgt_vi_nxv8i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsgt_vi_nxv8i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -1934,12 +1981,12 @@ entry: %a = call @llvm.riscv.vmsgt.nxv8i8.i8( %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgt_mask_vi_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgt_mask_vi_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1954,12 +2001,12 @@ entry: %1, i8 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgt_vi_nxv16i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsgt_vi_nxv16i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -1969,12 +2016,12 @@ entry: %a = call @llvm.riscv.vmsgt.nxv16i8.i8( %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgt_mask_vi_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgt_mask_vi_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1989,12 +2036,12 @@ entry: %1, i8 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgt_vi_nxv32i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsgt_vi_nxv32i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -2004,12 +2051,12 @@ entry: %a = call @llvm.riscv.vmsgt.nxv32i8.i8( %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgt_mask_vi_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgt_mask_vi_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2024,12 +2071,12 @@ entry: %1, i8 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgt_vi_nxv1i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsgt_vi_nxv1i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -2039,12 +2086,12 @@ entry: %a = call @llvm.riscv.vmsgt.nxv1i16.i16( %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgt_mask_vi_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgt_mask_vi_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2059,12 +2106,12 @@ entry: %1, i16 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgt_vi_nxv2i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsgt_vi_nxv2i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -2074,12 +2121,12 @@ entry: %a = call @llvm.riscv.vmsgt.nxv2i16.i16( %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgt_mask_vi_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgt_mask_vi_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2094,12 +2141,12 @@ entry: %1, i16 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgt_vi_nxv4i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsgt_vi_nxv4i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -2109,12 +2156,12 @@ entry: %a = call @llvm.riscv.vmsgt.nxv4i16.i16( %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgt_mask_vi_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgt_mask_vi_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2129,12 +2176,12 @@ entry: %1, i16 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgt_vi_nxv8i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsgt_vi_nxv8i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -2144,12 +2191,12 @@ entry: %a = call @llvm.riscv.vmsgt.nxv8i16.i16( %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgt_mask_vi_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgt_mask_vi_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2164,12 +2211,12 @@ entry: %1, i16 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgt_vi_nxv16i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsgt_vi_nxv16i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -2179,12 +2226,12 @@ entry: %a = call @llvm.riscv.vmsgt.nxv16i16.i16( %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgt_mask_vi_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgt_mask_vi_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2199,12 +2246,12 @@ entry: %1, i16 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgt_vi_nxv1i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmsgt_vi_nxv1i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -2214,12 +2261,12 @@ entry: %a = call @llvm.riscv.vmsgt.nxv1i32.i32( %0, i32 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgt_mask_vi_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgt_mask_vi_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2234,12 +2281,12 @@ entry: %1, i32 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgt_vi_nxv2i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmsgt_vi_nxv2i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -2249,12 +2296,12 @@ entry: %a = call @llvm.riscv.vmsgt.nxv2i32.i32( %0, i32 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgt_mask_vi_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgt_mask_vi_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2269,12 +2316,12 @@ entry: %1, i32 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgt_vi_nxv4i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmsgt_vi_nxv4i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -2284,12 +2331,12 @@ entry: %a = call @llvm.riscv.vmsgt.nxv4i32.i32( %0, i32 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgt_mask_vi_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgt_mask_vi_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2304,12 +2351,12 @@ entry: %1, i32 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgt_vi_nxv8i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmsgt_vi_nxv8i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -2319,12 +2366,12 @@ entry: %a = call @llvm.riscv.vmsgt.nxv8i32.i32( %0, i32 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgt_mask_vi_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgt_mask_vi_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2339,12 +2386,12 @@ entry: %1, i32 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgt_vi_nxv1i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vmsgt_vi_nxv1i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -2354,12 +2401,12 @@ entry: %a = call @llvm.riscv.vmsgt.nxv1i64.i64( %0, i64 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgt_mask_vi_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgt_mask_vi_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2374,12 +2421,12 @@ entry: %1, i64 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgt_vi_nxv2i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vmsgt_vi_nxv2i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -2389,12 +2436,12 @@ entry: %a = call @llvm.riscv.vmsgt.nxv2i64.i64( %0, i64 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgt_mask_vi_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgt_mask_vi_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2409,12 +2456,12 @@ entry: %1, i64 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgt_vi_nxv4i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vmsgt_vi_nxv4i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -2424,12 +2471,12 @@ entry: %a = call @llvm.riscv.vmsgt.nxv4i64.i64( %0, i64 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgt_mask_vi_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgt_mask_vi_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2444,7 +2491,7 @@ entry: %1, i64 9, %2, - i32 %3) + iXLen %3) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll deleted file mode 100644 index 049450e..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll +++ /dev/null @@ -1,2414 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmsgtu.nxv1i8( - , - , - i64); - -define @intrinsic_vmsgtu_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv1i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv1i8( - , - , - , - , - i64); - -define @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmsltu.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgtu.nxv1i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgtu.mask.nxv1i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv2i8( - , - , - i64); - -define @intrinsic_vmsgtu_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv2i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv2i8( - , - , - , - , - i64); - -define @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmsltu.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgtu.nxv2i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgtu.mask.nxv2i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv4i8( - , - , - i64); - -define @intrinsic_vmsgtu_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv4i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv4i8( - , - , - , - , - i64); - -define @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmsltu.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgtu.nxv4i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgtu.mask.nxv4i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv8i8( - , - , - i64); - -define @intrinsic_vmsgtu_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv8i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv8i8( - , - , - , - , - i64); - -define @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmsltu.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgtu.nxv8i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgtu.mask.nxv8i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv16i8( - , - , - i64); - -define @intrinsic_vmsgtu_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v10, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv16i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv16i8( - , - , - , - , - i64); - -define @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmsltu.vv v14, v10, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsltu.vv v8, v12, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgtu.nxv16i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgtu.mask.nxv16i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv32i8( - , - , - i64); - -define @intrinsic_vmsgtu_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v12, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv32i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv32i8( - , - , - , - , - i64); - -define @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmsltu.vv v20, v12, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsltu.vv v8, v16, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgtu.nxv32i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgtu.mask.nxv32i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv1i16( - , - , - i64); - -define @intrinsic_vmsgtu_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv1i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv1i16( - , - , - , - , - i64); - -define @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmsltu.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgtu.nxv1i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgtu.mask.nxv1i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv2i16( - , - , - i64); - -define @intrinsic_vmsgtu_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv2i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv2i16( - , - , - , - , - i64); - -define @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmsltu.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgtu.nxv2i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgtu.mask.nxv2i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv4i16( - , - , - i64); - -define @intrinsic_vmsgtu_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv4i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv4i16( - , - , - , - , - i64); - -define @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmsltu.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgtu.nxv4i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgtu.mask.nxv4i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv8i16( - , - , - i64); - -define @intrinsic_vmsgtu_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v10, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv8i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv8i16( - , - , - , - , - i64); - -define @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmsltu.vv v14, v10, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsltu.vv v8, v12, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgtu.nxv8i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgtu.mask.nxv8i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv16i16( - , - , - i64); - -define @intrinsic_vmsgtu_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v12, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv16i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv16i16( - , - , - , - , - i64); - -define @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmsltu.vv v20, v12, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsltu.vv v8, v16, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgtu.nxv16i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgtu.mask.nxv16i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv1i32( - , - , - i64); - -define @intrinsic_vmsgtu_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv1i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv1i32( - , - , - , - , - i64); - -define @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmsltu.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgtu.nxv1i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgtu.mask.nxv1i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv2i32( - , - , - i64); - -define @intrinsic_vmsgtu_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv2i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv2i32( - , - , - , - , - i64); - -define @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmsltu.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgtu.nxv2i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgtu.mask.nxv2i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv4i32( - , - , - i64); - -define @intrinsic_vmsgtu_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v10, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv4i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv4i32( - , - , - , - , - i64); - -define @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmsltu.vv v14, v10, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsltu.vv v8, v12, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgtu.nxv4i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgtu.mask.nxv4i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv8i32( - , - , - i64); - -define @intrinsic_vmsgtu_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v12, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv8i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv8i32( - , - , - , - , - i64); - -define @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmsltu.vv v20, v12, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsltu.vv v8, v16, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgtu.nxv8i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgtu.mask.nxv8i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv1i64( - , - , - i64); - -define @intrinsic_vmsgtu_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv1i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv1i64( - , - , - , - , - i64); - -define @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmsltu.vv v8, v9, v8 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgtu.nxv1i64( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgtu.mask.nxv1i64( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv2i64( - , - , - i64); - -define @intrinsic_vmsgtu_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v10, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv2i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv2i64( - , - , - , - , - i64); - -define @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmsltu.vv v14, v10, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsltu.vv v8, v12, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgtu.nxv2i64( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgtu.mask.nxv2i64( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv4i64( - , - , - i64); - -define @intrinsic_vmsgtu_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v12, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv4i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv4i64( - , - , - , - , - i64); - -define @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmsltu.vv v20, v12, v8 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsltu.vv v8, v16, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsgtu.nxv4i64( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsgtu.mask.nxv4i64( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv1i8.i8( - , - i8, - i64); - -define @intrinsic_vmsgtu_vx_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vmsgtu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv1i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv1i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsgtu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv1i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv2i8.i8( - , - i8, - i64); - -define @intrinsic_vmsgtu_vx_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vmsgtu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv2i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv2i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsgtu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv2i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv4i8.i8( - , - i8, - i64); - -define @intrinsic_vmsgtu_vx_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vmsgtu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv4i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv4i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsgtu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv4i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv8i8.i8( - , - i8, - i64); - -define @intrinsic_vmsgtu_vx_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vmsgtu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv8i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv8i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsgtu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv8i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv16i8.i8( - , - i8, - i64); - -define @intrinsic_vmsgtu_vx_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vmsgtu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv16i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv16i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsgtu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsgtu.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv16i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv32i8.i8( - , - i8, - i64); - -define @intrinsic_vmsgtu_vx_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vmsgtu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv32i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv32i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsgtu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsgtu.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv32i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv1i16.i16( - , - i16, - i64); - -define @intrinsic_vmsgtu_vx_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vmsgtu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv1i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv1i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsgtu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv1i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv2i16.i16( - , - i16, - i64); - -define @intrinsic_vmsgtu_vx_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vmsgtu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv2i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv2i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsgtu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv2i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv4i16.i16( - , - i16, - i64); - -define @intrinsic_vmsgtu_vx_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vmsgtu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv4i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv4i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsgtu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv4i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv8i16.i16( - , - i16, - i64); - -define @intrinsic_vmsgtu_vx_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vmsgtu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv8i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv8i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsgtu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsgtu.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv8i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv16i16.i16( - , - i16, - i64); - -define @intrinsic_vmsgtu_vx_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vmsgtu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv16i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv16i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsgtu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsgtu.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv16i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv1i32.i32( - , - i32, - i64); - -define @intrinsic_vmsgtu_vx_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vmsgtu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv1i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv1i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmsgtu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv1i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv2i32.i32( - , - i32, - i64); - -define @intrinsic_vmsgtu_vx_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vmsgtu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv2i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv2i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmsgtu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv2i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv4i32.i32( - , - i32, - i64); - -define @intrinsic_vmsgtu_vx_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vmsgtu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv4i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv4i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmsgtu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsgtu.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv4i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv8i32.i32( - , - i32, - i64); - -define @intrinsic_vmsgtu_vx_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vmsgtu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv8i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv8i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmsgtu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsgtu.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv8i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv1i64.i64( - , - i64, - i64); - -define @intrinsic_vmsgtu_vx_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vmsgtu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv1i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv1i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vmsgtu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv2i64.i64( - , - i64, - i64); - -define @intrinsic_vmsgtu_vx_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vmsgtu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv2i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv2i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vmsgtu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsgtu.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsgtu.nxv4i64.i64( - , - i64, - i64); - -define @intrinsic_vmsgtu_vx_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vmsgtu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv4i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsgtu.mask.nxv4i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vmsgtu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsgtu.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4) - - ret %a -} - -define @intrinsic_vmsgtu_vi_nxv1i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv1i8.i8( - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgtu_mask_vi_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv1i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgtu_vi_nxv2i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv2i8.i8( - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgtu_mask_vi_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv2i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgtu_vi_nxv4i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv4i8.i8( - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgtu_mask_vi_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv4i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgtu_vi_nxv8i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv8i8.i8( - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgtu_mask_vi_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv8i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgtu_vi_nxv16i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv16i8.i8( - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgtu_mask_vi_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsgtu.vi v11, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv16i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgtu_vi_nxv32i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv32i8.i8( - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgtu_mask_vi_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsgtu.vi v13, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv32i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgtu_vi_nxv1i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv1i16.i16( - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgtu_mask_vi_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv1i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgtu_vi_nxv2i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv2i16.i16( - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgtu_mask_vi_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv2i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgtu_vi_nxv4i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv4i16.i16( - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgtu_mask_vi_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv4i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgtu_vi_nxv8i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv8i16.i16( - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgtu_mask_vi_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsgtu.vi v11, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv8i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgtu_vi_nxv16i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv16i16.i16( - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgtu_mask_vi_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsgtu.vi v13, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv16i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgtu_vi_nxv1i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv1i32.i32( - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgtu_mask_vi_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv1i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgtu_vi_nxv2i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv2i32.i32( - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgtu_mask_vi_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv2i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgtu_vi_nxv4i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv4i32.i32( - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgtu_mask_vi_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsgtu.vi v11, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv4i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgtu_vi_nxv8i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv8i32.i32( - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgtu_mask_vi_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsgtu.vi v13, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv8i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgtu_vi_nxv1i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv1i64.i64( - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgtu_mask_vi_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv1i64.i64( - %0, - %1, - i64 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgtu_vi_nxv2i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv2i64.i64( - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgtu_mask_vi_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsgtu.vi v11, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv2i64.i64( - %0, - %1, - i64 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsgtu_vi_nxv4i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmsgtu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.nxv4i64.i64( - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsgtu_mask_vi_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsgtu.vi v13, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsgtu.mask.nxv4i64.i64( - %0, - %1, - i64 9, - %2, - i64 %3) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll similarity index 85% rename from llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll index cec42c1..3416060 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vmsgtu.nxv1i8( , , - i32); + iXLen); -define @intrinsic_vmsgtu_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv1i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -26,9 +28,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv1i8( , , , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -42,13 +44,13 @@ entry: %mask = call @llvm.riscv.vmsgtu.nxv1i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgtu.mask.nxv1i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -56,9 +58,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv2i8( , , - i32); + iXLen); -define @intrinsic_vmsgtu_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -68,7 +70,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv2i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -78,9 +80,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv2i8( , , , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -94,13 +96,13 @@ entry: %mask = call @llvm.riscv.vmsgtu.nxv2i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgtu.mask.nxv2i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -108,9 +110,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv4i8( , , - i32); + iXLen); -define @intrinsic_vmsgtu_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -120,7 +122,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv4i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -130,9 +132,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv4i8( , , , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -146,13 +148,13 @@ entry: %mask = call @llvm.riscv.vmsgtu.nxv4i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgtu.mask.nxv4i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -160,9 +162,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv8i8( , , - i32); + iXLen); -define @intrinsic_vmsgtu_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -172,7 +174,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv8i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -182,9 +184,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv8i8( , , , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -198,13 +200,13 @@ entry: %mask = call @llvm.riscv.vmsgtu.nxv8i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgtu.mask.nxv8i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -212,9 +214,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv16i8( , , - i32); + iXLen); -define @intrinsic_vmsgtu_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -224,7 +226,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv16i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -234,9 +236,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv16i8( , , , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -250,13 +252,13 @@ entry: %mask = call @llvm.riscv.vmsgtu.nxv16i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgtu.mask.nxv16i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -264,9 +266,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv32i8( , , - i32); + iXLen); -define @intrinsic_vmsgtu_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -276,7 +278,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv32i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -286,9 +288,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv32i8( , , , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -302,13 +304,13 @@ entry: %mask = call @llvm.riscv.vmsgtu.nxv32i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgtu.mask.nxv32i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -316,9 +318,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv1i16( , , - i32); + iXLen); -define @intrinsic_vmsgtu_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -328,7 +330,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv1i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -338,9 +340,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv1i16( , , , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -354,13 +356,13 @@ entry: %mask = call @llvm.riscv.vmsgtu.nxv1i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgtu.mask.nxv1i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -368,9 +370,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv2i16( , , - i32); + iXLen); -define @intrinsic_vmsgtu_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -380,7 +382,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv2i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -390,9 +392,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv2i16( , , , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -406,13 +408,13 @@ entry: %mask = call @llvm.riscv.vmsgtu.nxv2i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgtu.mask.nxv2i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -420,9 +422,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv4i16( , , - i32); + iXLen); -define @intrinsic_vmsgtu_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -432,7 +434,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv4i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv4i16( , , , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -458,13 +460,13 @@ entry: %mask = call @llvm.riscv.vmsgtu.nxv4i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgtu.mask.nxv4i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -472,9 +474,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv8i16( , , - i32); + iXLen); -define @intrinsic_vmsgtu_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -484,7 +486,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv8i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -494,9 +496,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv8i16( , , , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -510,13 +512,13 @@ entry: %mask = call @llvm.riscv.vmsgtu.nxv8i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgtu.mask.nxv8i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -524,9 +526,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv16i16( , , - i32); + iXLen); -define @intrinsic_vmsgtu_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -536,7 +538,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv16i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -546,9 +548,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv16i16( , , , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -562,13 +564,13 @@ entry: %mask = call @llvm.riscv.vmsgtu.nxv16i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgtu.mask.nxv16i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -576,9 +578,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv1i32( , , - i32); + iXLen); -define @intrinsic_vmsgtu_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -588,7 +590,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv1i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -598,9 +600,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv1i32( , , , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -614,13 +616,13 @@ entry: %mask = call @llvm.riscv.vmsgtu.nxv1i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgtu.mask.nxv1i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -628,9 +630,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv2i32( , , - i32); + iXLen); -define @intrinsic_vmsgtu_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -640,7 +642,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv2i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -650,9 +652,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv2i32( , , , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -666,13 +668,13 @@ entry: %mask = call @llvm.riscv.vmsgtu.nxv2i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgtu.mask.nxv2i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -680,9 +682,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv4i32( , , - i32); + iXLen); -define @intrinsic_vmsgtu_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -692,7 +694,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv4i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -702,9 +704,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv4i32( , , , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -718,13 +720,13 @@ entry: %mask = call @llvm.riscv.vmsgtu.nxv4i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgtu.mask.nxv4i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -732,9 +734,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv8i32( , , - i32); + iXLen); -define @intrinsic_vmsgtu_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -744,7 +746,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv8i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -754,9 +756,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv8i32( , , , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -770,13 +772,13 @@ entry: %mask = call @llvm.riscv.vmsgtu.nxv8i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgtu.mask.nxv8i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -784,9 +786,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv1i64( , , - i32); + iXLen); -define @intrinsic_vmsgtu_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -796,7 +798,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv1i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -806,9 +808,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv1i64( , , , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -822,13 +824,13 @@ entry: %mask = call @llvm.riscv.vmsgtu.nxv1i64( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgtu.mask.nxv1i64( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -836,9 +838,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv2i64( , , - i32); + iXLen); -define @intrinsic_vmsgtu_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -848,7 +850,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv2i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -858,9 +860,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv2i64( , , , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -874,13 +876,13 @@ entry: %mask = call @llvm.riscv.vmsgtu.nxv2i64( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgtu.mask.nxv2i64( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -888,9 +890,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv4i64( , , - i32); + iXLen); -define @intrinsic_vmsgtu_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -900,7 +902,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv4i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -910,9 +912,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv4i64( , , , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -926,13 +928,13 @@ entry: %mask = call @llvm.riscv.vmsgtu.nxv4i64( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsgtu.mask.nxv4i64( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -940,9 +942,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv1i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsgtu_vx_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -952,7 +954,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv1i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -962,9 +964,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv1i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -979,7 +981,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -987,9 +989,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv2i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsgtu_vx_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -999,7 +1001,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv2i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1009,9 +1011,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv2i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1026,7 +1028,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1034,9 +1036,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv4i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsgtu_vx_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -1046,7 +1048,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv4i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1056,9 +1058,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv4i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1073,7 +1075,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1081,9 +1083,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv8i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsgtu_vx_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -1093,7 +1095,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv8i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1103,9 +1105,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv8i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1120,7 +1122,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1128,9 +1130,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv16i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsgtu_vx_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -1140,7 +1142,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv16i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1150,9 +1152,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv16i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1167,7 +1169,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1175,9 +1177,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv32i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsgtu_vx_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -1187,7 +1189,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv32i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1197,9 +1199,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv32i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1214,7 +1216,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1222,9 +1224,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv1i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsgtu_vx_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -1234,7 +1236,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv1i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1244,9 +1246,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv1i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1261,7 +1263,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1269,9 +1271,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv2i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsgtu_vx_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -1281,7 +1283,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv2i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1291,9 +1293,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv2i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1308,7 +1310,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1316,9 +1318,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv4i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsgtu_vx_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -1328,7 +1330,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv4i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1338,9 +1340,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv4i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1355,7 +1357,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1363,9 +1365,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv8i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsgtu_vx_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -1375,7 +1377,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv8i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1385,9 +1387,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv8i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1402,7 +1404,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1410,9 +1412,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv16i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsgtu_vx_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -1422,7 +1424,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv16i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1432,9 +1434,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv16i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1449,7 +1451,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1457,9 +1459,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv1i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsgtu_vx_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -1469,7 +1471,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv1i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1479,9 +1481,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv1i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1496,7 +1498,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1504,9 +1506,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv2i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsgtu_vx_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -1516,7 +1518,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv2i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1526,9 +1528,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv2i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1543,7 +1545,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1551,9 +1553,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv4i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsgtu_vx_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -1563,7 +1565,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv4i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1573,9 +1575,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv4i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1590,7 +1592,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1598,9 +1600,9 @@ entry: declare @llvm.riscv.vmsgtu.nxv8i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsgtu_vx_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsgtu_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -1610,7 +1612,7 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv8i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1620,9 +1622,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv8i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsgtu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsgtu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1637,7 +1639,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1645,25 +1647,31 @@ entry: declare @llvm.riscv.vmsgtu.nxv1i64.i64( , i64, - i32); - -define @intrinsic_vmsgtu_vx_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vmsltu.vv v0, v9, v8 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsgtu_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsgtu_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vmsltu.vv v0, v9, v8 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsgtu_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmsgtu.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsgtu.nxv1i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1673,30 +1681,39 @@ declare @llvm.riscv.vmsgtu.mask.nxv1i64.i64( , i64, , - i32); - -define @intrinsic_vmsgtu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v11, (a0), zero -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vv v10, v11, v8, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsgtu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v11, (a0), zero +; RV32-NEXT: vmv1r.v v10, v0 +; RV32-NEXT: vmv1r.v v0, v9 +; RV32-NEXT: vmsltu.vv v10, v11, v8, v0.t +; RV32-NEXT: vmv.v.v v0, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v10, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vmv1r.v v0, v9 +; RV64-NEXT: vmsgtu.vx v10, v8, a0, v0.t +; RV64-NEXT: vmv.v.v v0, v10 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsgtu.mask.nxv1i64.i64( %0, %1, i64 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1704,25 +1721,31 @@ entry: declare @llvm.riscv.vmsgtu.nxv2i64.i64( , i64, - i32); - -define @intrinsic_vmsgtu_vx_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vmsltu.vv v0, v10, v8 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsgtu_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsgtu_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vmsltu.vv v0, v10, v8 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsgtu_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vmsgtu.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsgtu.nxv2i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1732,30 +1755,39 @@ declare @llvm.riscv.vmsgtu.mask.nxv2i64.i64( , i64, , - i32); - -define @intrinsic_vmsgtu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsltu.vv v11, v12, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsgtu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmv1r.v v11, v0 +; RV32-NEXT: vmv1r.v v0, v10 +; RV32-NEXT: vmsltu.vv v11, v12, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v11 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v11, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vmv1r.v v0, v10 +; RV64-NEXT: vmsgtu.vx v11, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v11 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsgtu.mask.nxv2i64.i64( %0, %1, i64 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1763,25 +1795,31 @@ entry: declare @llvm.riscv.vmsgtu.nxv4i64.i64( , i64, - i32); - -define @intrinsic_vmsgtu_vx_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmsltu.vv v0, v12, v8 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsgtu_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsgtu_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmsltu.vv v0, v12, v8 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsgtu_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmsgtu.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsgtu.nxv4i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1791,35 +1829,44 @@ declare @llvm.riscv.vmsgtu.mask.nxv4i64.i64( , i64, , - i32); - -define @intrinsic_vmsgtu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsltu.vv v13, v16, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsgtu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vmv1r.v v13, v0 +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: vmsltu.vv v13, v16, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v13 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v13, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: vmsgtu.vx v13, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v13 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsgtu.mask.nxv4i64.i64( %0, %1, i64 %2, %3, - i32 %4) + iXLen %4) ret %a } -define @intrinsic_vmsgtu_vi_nxv1i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsgtu_vi_nxv1i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -1829,12 +1876,12 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv1i8.i8( %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgtu_mask_vi_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgtu_mask_vi_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1849,12 +1896,12 @@ entry: %1, i8 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgtu_vi_nxv2i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsgtu_vi_nxv2i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -1864,12 +1911,12 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv2i8.i8( %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgtu_mask_vi_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgtu_mask_vi_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1884,12 +1931,12 @@ entry: %1, i8 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgtu_vi_nxv4i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsgtu_vi_nxv4i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -1899,12 +1946,12 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv4i8.i8( %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgtu_mask_vi_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgtu_mask_vi_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1919,12 +1966,12 @@ entry: %1, i8 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgtu_vi_nxv8i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsgtu_vi_nxv8i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -1934,12 +1981,12 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv8i8.i8( %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgtu_mask_vi_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgtu_mask_vi_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1954,12 +2001,12 @@ entry: %1, i8 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgtu_vi_nxv16i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsgtu_vi_nxv16i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -1969,12 +2016,12 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv16i8.i8( %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgtu_mask_vi_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgtu_mask_vi_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1989,12 +2036,12 @@ entry: %1, i8 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgtu_vi_nxv32i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsgtu_vi_nxv32i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -2004,12 +2051,12 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv32i8.i8( %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgtu_mask_vi_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgtu_mask_vi_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2024,12 +2071,12 @@ entry: %1, i8 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgtu_vi_nxv1i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsgtu_vi_nxv1i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -2039,12 +2086,12 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv1i16.i16( %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgtu_mask_vi_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgtu_mask_vi_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2059,12 +2106,12 @@ entry: %1, i16 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgtu_vi_nxv2i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsgtu_vi_nxv2i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -2074,12 +2121,12 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv2i16.i16( %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgtu_mask_vi_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgtu_mask_vi_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2094,12 +2141,12 @@ entry: %1, i16 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgtu_vi_nxv4i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsgtu_vi_nxv4i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -2109,12 +2156,12 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv4i16.i16( %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgtu_mask_vi_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgtu_mask_vi_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2129,12 +2176,12 @@ entry: %1, i16 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgtu_vi_nxv8i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsgtu_vi_nxv8i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -2144,12 +2191,12 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv8i16.i16( %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgtu_mask_vi_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgtu_mask_vi_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2164,12 +2211,12 @@ entry: %1, i16 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgtu_vi_nxv16i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsgtu_vi_nxv16i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -2179,12 +2226,12 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv16i16.i16( %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgtu_mask_vi_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgtu_mask_vi_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2199,12 +2246,12 @@ entry: %1, i16 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgtu_vi_nxv1i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmsgtu_vi_nxv1i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -2214,12 +2261,12 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv1i32.i32( %0, i32 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgtu_mask_vi_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgtu_mask_vi_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2234,12 +2281,12 @@ entry: %1, i32 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgtu_vi_nxv2i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmsgtu_vi_nxv2i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -2249,12 +2296,12 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv2i32.i32( %0, i32 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgtu_mask_vi_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgtu_mask_vi_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2269,12 +2316,12 @@ entry: %1, i32 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgtu_vi_nxv4i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmsgtu_vi_nxv4i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -2284,12 +2331,12 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv4i32.i32( %0, i32 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgtu_mask_vi_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgtu_mask_vi_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2304,12 +2351,12 @@ entry: %1, i32 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgtu_vi_nxv8i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmsgtu_vi_nxv8i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -2319,12 +2366,12 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv8i32.i32( %0, i32 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgtu_mask_vi_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgtu_mask_vi_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2339,12 +2386,12 @@ entry: %1, i32 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgtu_vi_nxv1i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vmsgtu_vi_nxv1i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -2354,12 +2401,12 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv1i64.i64( %0, i64 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgtu_mask_vi_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgtu_mask_vi_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2374,12 +2421,12 @@ entry: %1, i64 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgtu_vi_nxv2i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vmsgtu_vi_nxv2i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -2389,12 +2436,12 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv2i64.i64( %0, i64 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgtu_mask_vi_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgtu_mask_vi_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2409,12 +2456,12 @@ entry: %1, i64 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsgtu_vi_nxv4i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vmsgtu_vi_nxv4i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -2424,12 +2471,12 @@ entry: %a = call @llvm.riscv.vmsgtu.nxv4i64.i64( %0, i64 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsgtu_mask_vi_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsgtu_mask_vi_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2444,7 +2491,7 @@ entry: %1, i64 9, %2, - i32 %3) + iXLen %3) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll deleted file mode 100644 index 2d2e261..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll +++ /dev/null @@ -1,2450 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmsle.nxv1i8( - , - , - i32); - -define @intrinsic_vmsle_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv1i8( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv1i8( - , - , - , - , - i32); - -define @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmsle.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsle.nxv1i8( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsle.mask.nxv1i8( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv2i8( - , - , - i32); - -define @intrinsic_vmsle_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv2i8( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv2i8( - , - , - , - , - i32); - -define @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmsle.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsle.nxv2i8( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsle.mask.nxv2i8( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv4i8( - , - , - i32); - -define @intrinsic_vmsle_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv4i8( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv4i8( - , - , - , - , - i32); - -define @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmsle.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsle.nxv4i8( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsle.mask.nxv4i8( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv8i8( - , - , - i32); - -define @intrinsic_vmsle_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv8i8( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv8i8( - , - , - , - , - i32); - -define @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmsle.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsle.nxv8i8( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsle.mask.nxv8i8( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv16i8( - , - , - i32); - -define @intrinsic_vmsle_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vv_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmsle.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv16i8( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv16i8( - , - , - , - , - i32); - -define @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmsle.vv v14, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsle.vv v8, v10, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsle.nxv16i8( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsle.mask.nxv16i8( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv32i8( - , - , - i32); - -define @intrinsic_vmsle_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vv_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmsle.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv32i8( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv32i8( - , - , - , - , - i32); - -define @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmsle.vv v20, v8, v12 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsle.vv v8, v12, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsle.nxv32i8( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsle.mask.nxv32i8( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv1i16( - , - , - i32); - -define @intrinsic_vmsle_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv1i16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv1i16( - , - , - , - , - i32); - -define @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmsle.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsle.nxv1i16( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsle.mask.nxv1i16( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv2i16( - , - , - i32); - -define @intrinsic_vmsle_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv2i16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv2i16( - , - , - , - , - i32); - -define @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmsle.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsle.nxv2i16( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsle.mask.nxv2i16( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv4i16( - , - , - i32); - -define @intrinsic_vmsle_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv4i16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv4i16( - , - , - , - , - i32); - -define @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmsle.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsle.nxv4i16( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsle.mask.nxv4i16( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv8i16( - , - , - i32); - -define @intrinsic_vmsle_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmsle.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv8i16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv8i16( - , - , - , - , - i32); - -define @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmsle.vv v14, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsle.vv v8, v10, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsle.nxv8i16( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsle.mask.nxv8i16( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv16i16( - , - , - i32); - -define @intrinsic_vmsle_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vv_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmsle.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv16i16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv16i16( - , - , - , - , - i32); - -define @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmsle.vv v20, v8, v12 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsle.vv v8, v12, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsle.nxv16i16( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsle.mask.nxv16i16( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv1i32( - , - , - i32); - -define @intrinsic_vmsle_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv1i32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv1i32( - , - , - , - , - i32); - -define @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmsle.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsle.nxv1i32( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsle.mask.nxv1i32( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv2i32( - , - , - i32); - -define @intrinsic_vmsle_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv2i32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv2i32( - , - , - , - , - i32); - -define @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmsle.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsle.nxv2i32( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsle.mask.nxv2i32( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv4i32( - , - , - i32); - -define @intrinsic_vmsle_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmsle.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv4i32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv4i32( - , - , - , - , - i32); - -define @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmsle.vv v14, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsle.vv v8, v10, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsle.nxv4i32( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsle.mask.nxv4i32( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv8i32( - , - , - i32); - -define @intrinsic_vmsle_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmsle.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv8i32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv8i32( - , - , - , - , - i32); - -define @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmsle.vv v20, v8, v12 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsle.vv v8, v12, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsle.nxv8i32( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsle.mask.nxv8i32( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv1i64( - , - , - i32); - -define @intrinsic_vmsle_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv1i64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv1i64( - , - , - , - , - i32); - -define @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmsle.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsle.nxv1i64( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsle.mask.nxv1i64( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv2i64( - , - , - i32); - -define @intrinsic_vmsle_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmsle.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv2i64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv2i64( - , - , - , - , - i32); - -define @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmsle.vv v14, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsle.vv v8, v10, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsle.nxv2i64( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsle.mask.nxv2i64( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv4i64( - , - , - i32); - -define @intrinsic_vmsle_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmsle.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv4i64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv4i64( - , - , - , - , - i32); - -define @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmsle.vv v20, v8, v12 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsle.vv v8, v12, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsle.nxv4i64( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsle.mask.nxv4i64( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv1i8.i8( - , - i8, - i32); - -define @intrinsic_vmsle_vx_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vmsle.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv1i8.i8( - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv1i8.i8( - , - , - i8, - , - i32); - -define @intrinsic_vmsle_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv1i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv2i8.i8( - , - i8, - i32); - -define @intrinsic_vmsle_vx_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vmsle.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv2i8.i8( - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv2i8.i8( - , - , - i8, - , - i32); - -define @intrinsic_vmsle_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv2i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv4i8.i8( - , - i8, - i32); - -define @intrinsic_vmsle_vx_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vmsle.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv4i8.i8( - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv4i8.i8( - , - , - i8, - , - i32); - -define @intrinsic_vmsle_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv4i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv8i8.i8( - , - i8, - i32); - -define @intrinsic_vmsle_vx_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vmsle.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv8i8.i8( - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv8i8.i8( - , - , - i8, - , - i32); - -define @intrinsic_vmsle_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv8i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv16i8.i8( - , - i8, - i32); - -define @intrinsic_vmsle_vx_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vx_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vmsle.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv16i8.i8( - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv16i8.i8( - , - , - i8, - , - i32); - -define @intrinsic_vmsle_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsle.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv16i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv32i8.i8( - , - i8, - i32); - -define @intrinsic_vmsle_vx_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vx_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vmsle.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv32i8.i8( - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv32i8.i8( - , - , - i8, - , - i32); - -define @intrinsic_vmsle_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsle.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv32i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv1i16.i16( - , - i16, - i32); - -define @intrinsic_vmsle_vx_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vmsle.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv1i16.i16( - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv1i16.i16( - , - , - i16, - , - i32); - -define @intrinsic_vmsle_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv1i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv2i16.i16( - , - i16, - i32); - -define @intrinsic_vmsle_vx_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vmsle.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv2i16.i16( - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv2i16.i16( - , - , - i16, - , - i32); - -define @intrinsic_vmsle_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv2i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv4i16.i16( - , - i16, - i32); - -define @intrinsic_vmsle_vx_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vmsle.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv4i16.i16( - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv4i16.i16( - , - , - i16, - , - i32); - -define @intrinsic_vmsle_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv4i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv8i16.i16( - , - i16, - i32); - -define @intrinsic_vmsle_vx_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vmsle.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv8i16.i16( - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv8i16.i16( - , - , - i16, - , - i32); - -define @intrinsic_vmsle_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsle.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv8i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv16i16.i16( - , - i16, - i32); - -define @intrinsic_vmsle_vx_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vx_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vmsle.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv16i16.i16( - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv16i16.i16( - , - , - i16, - , - i32); - -define @intrinsic_vmsle_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsle.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv16i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv1i32.i32( - , - i32, - i32); - -define @intrinsic_vmsle_vx_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vmsle.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv1i32.i32( - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv1i32.i32( - , - , - i32, - , - i32); - -define @intrinsic_vmsle_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv1i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv2i32.i32( - , - i32, - i32); - -define @intrinsic_vmsle_vx_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vmsle.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv2i32.i32( - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv2i32.i32( - , - , - i32, - , - i32); - -define @intrinsic_vmsle_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv2i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv4i32.i32( - , - i32, - i32); - -define @intrinsic_vmsle_vx_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vmsle.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv4i32.i32( - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv4i32.i32( - , - , - i32, - , - i32); - -define @intrinsic_vmsle_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsle.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv4i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv8i32.i32( - , - i32, - i32); - -define @intrinsic_vmsle_vx_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vmsle.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv8i32.i32( - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv8i32.i32( - , - , - i32, - , - i32); - -define @intrinsic_vmsle_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsle.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv8i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv1i64.i64( - , - i64, - i32); - -define @intrinsic_vmsle_vx_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vmsle.vv v0, v8, v9 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv1i64.i64( - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv1i64.i64( - , - , - i64, - , - i32); - -define @intrinsic_vmsle_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v11, (a0), zero -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vv v10, v8, v11, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv2i64.i64( - , - i64, - i32); - -define @intrinsic_vmsle_vx_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vmsle.vv v0, v8, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv2i64.i64( - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv2i64.i64( - , - , - i64, - , - i32); - -define @intrinsic_vmsle_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsle.vv v11, v8, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsle.nxv4i64.i64( - , - i64, - i32); - -define @intrinsic_vmsle_vx_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmsle.vv v0, v8, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv4i64.i64( - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsle.mask.nxv4i64.i64( - , - , - i64, - , - i32); - -define @intrinsic_vmsle_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsle.vv v13, v8, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4) - - ret %a -} - -define @intrinsic_vmsle_vi_nxv1i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv1i8.i8( - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsle_mask_vi_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv1i8.i8( - %0, - %1, - i8 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsle_vi_nxv2i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv2i8.i8( - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsle_mask_vi_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv2i8.i8( - %0, - %1, - i8 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsle_vi_nxv4i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv4i8.i8( - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsle_mask_vi_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv4i8.i8( - %0, - %1, - i8 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsle_vi_nxv8i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv8i8.i8( - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsle_mask_vi_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv8i8.i8( - %0, - %1, - i8 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsle_vi_nxv16i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vi_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv16i8.i8( - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsle_mask_vi_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsle.vi v11, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv16i8.i8( - %0, - %1, - i8 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsle_vi_nxv32i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vi_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv32i8.i8( - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsle_mask_vi_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsle.vi v13, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv32i8.i8( - %0, - %1, - i8 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsle_vi_nxv1i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv1i16.i16( - %0, - i16 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsle_mask_vi_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv1i16.i16( - %0, - %1, - i16 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsle_vi_nxv2i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv2i16.i16( - %0, - i16 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsle_mask_vi_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv2i16.i16( - %0, - %1, - i16 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsle_vi_nxv4i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv4i16.i16( - %0, - i16 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsle_mask_vi_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv4i16.i16( - %0, - %1, - i16 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsle_vi_nxv8i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv8i16.i16( - %0, - i16 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsle_mask_vi_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsle.vi v11, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv8i16.i16( - %0, - %1, - i16 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsle_vi_nxv16i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vi_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv16i16.i16( - %0, - i16 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsle_mask_vi_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsle.vi v13, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv16i16.i16( - %0, - %1, - i16 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsle_vi_nxv1i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv1i32.i32( - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsle_mask_vi_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv1i32.i32( - %0, - %1, - i32 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsle_vi_nxv2i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv2i32.i32( - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsle_mask_vi_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv2i32.i32( - %0, - %1, - i32 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsle_vi_nxv4i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv4i32.i32( - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsle_mask_vi_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsle.vi v11, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv4i32.i32( - %0, - %1, - i32 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsle_vi_nxv8i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv8i32.i32( - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsle_mask_vi_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsle.vi v13, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv8i32.i32( - %0, - %1, - i32 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsle_vi_nxv1i64_i64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv1i64.i64( - %0, - i64 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsle_mask_vi_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv1i64.i64( - %0, - %1, - i64 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsle_vi_nxv2i64_i64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv2i64.i64( - %0, - i64 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsle_mask_vi_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsle.vi v11, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv2i64.i64( - %0, - %1, - i64 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsle_vi_nxv4i64_i64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.nxv4i64.i64( - %0, - i64 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsle_mask_vi_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsle.vi v13, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsle.mask.nxv4i64.i64( - %0, - %1, - i64 9, - %2, - i32 %3) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle.ll similarity index 84% rename from llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vmsle.ll index 58f33dc..1a02131 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsle.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vmsle.nxv1i8( , , - i64); + iXLen); -define @intrinsic_vmsle_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsle_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv1i8( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -26,9 +28,9 @@ declare @llvm.riscv.vmsle.mask.nxv1i8( , , , - i64); + iXLen); -define @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -42,13 +44,13 @@ entry: %mask = call @llvm.riscv.vmsle.nxv1i8( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsle.mask.nxv1i8( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -56,9 +58,9 @@ entry: declare @llvm.riscv.vmsle.nxv2i8( , , - i64); + iXLen); -define @intrinsic_vmsle_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsle_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -68,7 +70,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv2i8( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -78,9 +80,9 @@ declare @llvm.riscv.vmsle.mask.nxv2i8( , , , - i64); + iXLen); -define @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -94,13 +96,13 @@ entry: %mask = call @llvm.riscv.vmsle.nxv2i8( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsle.mask.nxv2i8( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -108,9 +110,9 @@ entry: declare @llvm.riscv.vmsle.nxv4i8( , , - i64); + iXLen); -define @intrinsic_vmsle_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsle_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -120,7 +122,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv4i8( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -130,9 +132,9 @@ declare @llvm.riscv.vmsle.mask.nxv4i8( , , , - i64); + iXLen); -define @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -146,13 +148,13 @@ entry: %mask = call @llvm.riscv.vmsle.nxv4i8( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsle.mask.nxv4i8( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -160,9 +162,9 @@ entry: declare @llvm.riscv.vmsle.nxv8i8( , , - i64); + iXLen); -define @intrinsic_vmsle_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsle_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -172,7 +174,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv8i8( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -182,9 +184,9 @@ declare @llvm.riscv.vmsle.mask.nxv8i8( , , , - i64); + iXLen); -define @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -198,13 +200,13 @@ entry: %mask = call @llvm.riscv.vmsle.nxv8i8( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsle.mask.nxv8i8( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -212,9 +214,9 @@ entry: declare @llvm.riscv.vmsle.nxv16i8( , , - i64); + iXLen); -define @intrinsic_vmsle_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsle_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -224,7 +226,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv16i8( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -234,9 +236,9 @@ declare @llvm.riscv.vmsle.mask.nxv16i8( , , , - i64); + iXLen); -define @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -250,13 +252,13 @@ entry: %mask = call @llvm.riscv.vmsle.nxv16i8( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsle.mask.nxv16i8( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -264,9 +266,9 @@ entry: declare @llvm.riscv.vmsle.nxv32i8( , , - i64); + iXLen); -define @intrinsic_vmsle_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsle_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -276,7 +278,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv32i8( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -286,9 +288,9 @@ declare @llvm.riscv.vmsle.mask.nxv32i8( , , , - i64); + iXLen); -define @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -302,13 +304,13 @@ entry: %mask = call @llvm.riscv.vmsle.nxv32i8( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsle.mask.nxv32i8( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -316,9 +318,9 @@ entry: declare @llvm.riscv.vmsle.nxv1i16( , , - i64); + iXLen); -define @intrinsic_vmsle_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsle_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -328,7 +330,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv1i16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -338,9 +340,9 @@ declare @llvm.riscv.vmsle.mask.nxv1i16( , , , - i64); + iXLen); -define @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -354,13 +356,13 @@ entry: %mask = call @llvm.riscv.vmsle.nxv1i16( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsle.mask.nxv1i16( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -368,9 +370,9 @@ entry: declare @llvm.riscv.vmsle.nxv2i16( , , - i64); + iXLen); -define @intrinsic_vmsle_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsle_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -380,7 +382,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv2i16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -390,9 +392,9 @@ declare @llvm.riscv.vmsle.mask.nxv2i16( , , , - i64); + iXLen); -define @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -406,13 +408,13 @@ entry: %mask = call @llvm.riscv.vmsle.nxv2i16( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsle.mask.nxv2i16( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -420,9 +422,9 @@ entry: declare @llvm.riscv.vmsle.nxv4i16( , , - i64); + iXLen); -define @intrinsic_vmsle_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsle_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -432,7 +434,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv4i16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vmsle.mask.nxv4i16( , , , - i64); + iXLen); -define @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -458,13 +460,13 @@ entry: %mask = call @llvm.riscv.vmsle.nxv4i16( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsle.mask.nxv4i16( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -472,9 +474,9 @@ entry: declare @llvm.riscv.vmsle.nxv8i16( , , - i64); + iXLen); -define @intrinsic_vmsle_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsle_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -484,7 +486,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv8i16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -494,9 +496,9 @@ declare @llvm.riscv.vmsle.mask.nxv8i16( , , , - i64); + iXLen); -define @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -510,13 +512,13 @@ entry: %mask = call @llvm.riscv.vmsle.nxv8i16( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsle.mask.nxv8i16( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -524,9 +526,9 @@ entry: declare @llvm.riscv.vmsle.nxv16i16( , , - i64); + iXLen); -define @intrinsic_vmsle_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsle_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -536,7 +538,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv16i16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -546,9 +548,9 @@ declare @llvm.riscv.vmsle.mask.nxv16i16( , , , - i64); + iXLen); -define @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -562,13 +564,13 @@ entry: %mask = call @llvm.riscv.vmsle.nxv16i16( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsle.mask.nxv16i16( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -576,9 +578,9 @@ entry: declare @llvm.riscv.vmsle.nxv1i32( , , - i64); + iXLen); -define @intrinsic_vmsle_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsle_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -588,7 +590,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv1i32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -598,9 +600,9 @@ declare @llvm.riscv.vmsle.mask.nxv1i32( , , , - i64); + iXLen); -define @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -614,13 +616,13 @@ entry: %mask = call @llvm.riscv.vmsle.nxv1i32( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsle.mask.nxv1i32( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -628,9 +630,9 @@ entry: declare @llvm.riscv.vmsle.nxv2i32( , , - i64); + iXLen); -define @intrinsic_vmsle_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsle_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -640,7 +642,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv2i32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -650,9 +652,9 @@ declare @llvm.riscv.vmsle.mask.nxv2i32( , , , - i64); + iXLen); -define @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -666,13 +668,13 @@ entry: %mask = call @llvm.riscv.vmsle.nxv2i32( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsle.mask.nxv2i32( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -680,9 +682,9 @@ entry: declare @llvm.riscv.vmsle.nxv4i32( , , - i64); + iXLen); -define @intrinsic_vmsle_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsle_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -692,7 +694,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv4i32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -702,9 +704,9 @@ declare @llvm.riscv.vmsle.mask.nxv4i32( , , , - i64); + iXLen); -define @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -718,13 +720,13 @@ entry: %mask = call @llvm.riscv.vmsle.nxv4i32( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsle.mask.nxv4i32( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -732,9 +734,9 @@ entry: declare @llvm.riscv.vmsle.nxv8i32( , , - i64); + iXLen); -define @intrinsic_vmsle_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsle_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -744,7 +746,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv8i32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -754,9 +756,9 @@ declare @llvm.riscv.vmsle.mask.nxv8i32( , , , - i64); + iXLen); -define @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -770,13 +772,13 @@ entry: %mask = call @llvm.riscv.vmsle.nxv8i32( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsle.mask.nxv8i32( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -784,9 +786,9 @@ entry: declare @llvm.riscv.vmsle.nxv1i64( , , - i64); + iXLen); -define @intrinsic_vmsle_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsle_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -796,7 +798,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv1i64( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -806,9 +808,9 @@ declare @llvm.riscv.vmsle.mask.nxv1i64( , , , - i64); + iXLen); -define @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -822,13 +824,13 @@ entry: %mask = call @llvm.riscv.vmsle.nxv1i64( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsle.mask.nxv1i64( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -836,9 +838,9 @@ entry: declare @llvm.riscv.vmsle.nxv2i64( , , - i64); + iXLen); -define @intrinsic_vmsle_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsle_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -848,7 +850,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv2i64( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -858,9 +860,9 @@ declare @llvm.riscv.vmsle.mask.nxv2i64( , , , - i64); + iXLen); -define @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -874,13 +876,13 @@ entry: %mask = call @llvm.riscv.vmsle.nxv2i64( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsle.mask.nxv2i64( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -888,9 +890,9 @@ entry: declare @llvm.riscv.vmsle.nxv4i64( , , - i64); + iXLen); -define @intrinsic_vmsle_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsle_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -900,7 +902,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv4i64( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -910,9 +912,9 @@ declare @llvm.riscv.vmsle.mask.nxv4i64( , , , - i64); + iXLen); -define @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -926,13 +928,13 @@ entry: %mask = call @llvm.riscv.vmsle.nxv4i64( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsle.mask.nxv4i64( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -940,9 +942,9 @@ entry: declare @llvm.riscv.vmsle.nxv1i8.i8( , i8, - i64); + iXLen); -define @intrinsic_vmsle_vx_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmsle_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -952,7 +954,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv1i8.i8( %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -962,9 +964,9 @@ declare @llvm.riscv.vmsle.mask.nxv1i8.i8( , i8, , - i64); + iXLen); -define @intrinsic_vmsle_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -979,7 +981,7 @@ entry: %1, i8 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -987,9 +989,9 @@ entry: declare @llvm.riscv.vmsle.nxv2i8.i8( , i8, - i64); + iXLen); -define @intrinsic_vmsle_vx_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmsle_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -999,7 +1001,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv2i8.i8( %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1009,9 +1011,9 @@ declare @llvm.riscv.vmsle.mask.nxv2i8.i8( , i8, , - i64); + iXLen); -define @intrinsic_vmsle_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1026,7 +1028,7 @@ entry: %1, i8 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1034,9 +1036,9 @@ entry: declare @llvm.riscv.vmsle.nxv4i8.i8( , i8, - i64); + iXLen); -define @intrinsic_vmsle_vx_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmsle_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -1046,7 +1048,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv4i8.i8( %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1056,9 +1058,9 @@ declare @llvm.riscv.vmsle.mask.nxv4i8.i8( , i8, , - i64); + iXLen); -define @intrinsic_vmsle_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1073,7 +1075,7 @@ entry: %1, i8 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1081,9 +1083,9 @@ entry: declare @llvm.riscv.vmsle.nxv8i8.i8( , i8, - i64); + iXLen); -define @intrinsic_vmsle_vx_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmsle_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -1093,7 +1095,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv8i8.i8( %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1103,9 +1105,9 @@ declare @llvm.riscv.vmsle.mask.nxv8i8.i8( , i8, , - i64); + iXLen); -define @intrinsic_vmsle_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1120,7 +1122,7 @@ entry: %1, i8 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1128,9 +1130,9 @@ entry: declare @llvm.riscv.vmsle.nxv16i8.i8( , i8, - i64); + iXLen); -define @intrinsic_vmsle_vx_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmsle_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -1140,7 +1142,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv16i8.i8( %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1150,9 +1152,9 @@ declare @llvm.riscv.vmsle.mask.nxv16i8.i8( , i8, , - i64); + iXLen); -define @intrinsic_vmsle_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1167,7 +1169,7 @@ entry: %1, i8 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1175,9 +1177,9 @@ entry: declare @llvm.riscv.vmsle.nxv32i8.i8( , i8, - i64); + iXLen); -define @intrinsic_vmsle_vx_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmsle_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -1187,7 +1189,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv32i8.i8( %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1197,9 +1199,9 @@ declare @llvm.riscv.vmsle.mask.nxv32i8.i8( , i8, , - i64); + iXLen); -define @intrinsic_vmsle_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1214,7 +1216,7 @@ entry: %1, i8 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1222,9 +1224,9 @@ entry: declare @llvm.riscv.vmsle.nxv1i16.i16( , i16, - i64); + iXLen); -define @intrinsic_vmsle_vx_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmsle_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -1234,7 +1236,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv1i16.i16( %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1244,9 +1246,9 @@ declare @llvm.riscv.vmsle.mask.nxv1i16.i16( , i16, , - i64); + iXLen); -define @intrinsic_vmsle_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1261,7 +1263,7 @@ entry: %1, i16 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1269,9 +1271,9 @@ entry: declare @llvm.riscv.vmsle.nxv2i16.i16( , i16, - i64); + iXLen); -define @intrinsic_vmsle_vx_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmsle_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -1281,7 +1283,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv2i16.i16( %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1291,9 +1293,9 @@ declare @llvm.riscv.vmsle.mask.nxv2i16.i16( , i16, , - i64); + iXLen); -define @intrinsic_vmsle_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1308,7 +1310,7 @@ entry: %1, i16 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1316,9 +1318,9 @@ entry: declare @llvm.riscv.vmsle.nxv4i16.i16( , i16, - i64); + iXLen); -define @intrinsic_vmsle_vx_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmsle_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -1328,7 +1330,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv4i16.i16( %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1338,9 +1340,9 @@ declare @llvm.riscv.vmsle.mask.nxv4i16.i16( , i16, , - i64); + iXLen); -define @intrinsic_vmsle_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1355,7 +1357,7 @@ entry: %1, i16 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1363,9 +1365,9 @@ entry: declare @llvm.riscv.vmsle.nxv8i16.i16( , i16, - i64); + iXLen); -define @intrinsic_vmsle_vx_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmsle_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -1375,7 +1377,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv8i16.i16( %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1385,9 +1387,9 @@ declare @llvm.riscv.vmsle.mask.nxv8i16.i16( , i16, , - i64); + iXLen); -define @intrinsic_vmsle_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1402,7 +1404,7 @@ entry: %1, i16 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1410,9 +1412,9 @@ entry: declare @llvm.riscv.vmsle.nxv16i16.i16( , i16, - i64); + iXLen); -define @intrinsic_vmsle_vx_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmsle_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -1422,7 +1424,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv16i16.i16( %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1432,9 +1434,9 @@ declare @llvm.riscv.vmsle.mask.nxv16i16.i16( , i16, , - i64); + iXLen); -define @intrinsic_vmsle_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1449,7 +1451,7 @@ entry: %1, i16 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1457,9 +1459,9 @@ entry: declare @llvm.riscv.vmsle.nxv1i32.i32( , i32, - i64); + iXLen); -define @intrinsic_vmsle_vx_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vmsle_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -1469,7 +1471,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv1i32.i32( %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1479,9 +1481,9 @@ declare @llvm.riscv.vmsle.mask.nxv1i32.i32( , i32, , - i64); + iXLen); -define @intrinsic_vmsle_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1496,7 +1498,7 @@ entry: %1, i32 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1504,9 +1506,9 @@ entry: declare @llvm.riscv.vmsle.nxv2i32.i32( , i32, - i64); + iXLen); -define @intrinsic_vmsle_vx_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vmsle_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -1516,7 +1518,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv2i32.i32( %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1526,9 +1528,9 @@ declare @llvm.riscv.vmsle.mask.nxv2i32.i32( , i32, , - i64); + iXLen); -define @intrinsic_vmsle_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1543,7 +1545,7 @@ entry: %1, i32 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1551,9 +1553,9 @@ entry: declare @llvm.riscv.vmsle.nxv4i32.i32( , i32, - i64); + iXLen); -define @intrinsic_vmsle_vx_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vmsle_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -1563,7 +1565,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv4i32.i32( %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1573,9 +1575,9 @@ declare @llvm.riscv.vmsle.mask.nxv4i32.i32( , i32, , - i64); + iXLen); -define @intrinsic_vmsle_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1590,7 +1592,7 @@ entry: %1, i32 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1598,9 +1600,9 @@ entry: declare @llvm.riscv.vmsle.nxv8i32.i32( , i32, - i64); + iXLen); -define @intrinsic_vmsle_vx_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vmsle_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -1610,7 +1612,7 @@ entry: %a = call @llvm.riscv.vmsle.nxv8i32.i32( %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1620,9 +1622,9 @@ declare @llvm.riscv.vmsle.mask.nxv8i32.i32( , i32, , - i64); + iXLen); -define @intrinsic_vmsle_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsle_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1637,7 +1639,7 @@ entry: %1, i32 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1645,19 +1647,31 @@ entry: declare @llvm.riscv.vmsle.nxv1i64.i64( , i64, - i64); - -define @intrinsic_vmsle_vx_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vmsle.vx v0, v8, a0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsle_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsle_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vmsle.vv v0, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsle_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmsle.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsle.nxv1i64.i64( %0, i64 %1, - i64 %2) + iXLen %2) ret %a } @@ -1667,24 +1681,39 @@ declare @llvm.riscv.vmsle.mask.nxv1i64.i64( , i64, , - i64); - -define @intrinsic_vmsle_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsle_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v11, (a0), zero +; RV32-NEXT: vmv1r.v v10, v0 +; RV32-NEXT: vmv1r.v v0, v9 +; RV32-NEXT: vmsle.vv v10, v8, v11, v0.t +; RV32-NEXT: vmv.v.v v0, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v10, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vmv1r.v v0, v9 +; RV64-NEXT: vmsle.vx v10, v8, a0, v0.t +; RV64-NEXT: vmv.v.v v0, v10 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsle.mask.nxv1i64.i64( %0, %1, i64 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1692,19 +1721,31 @@ entry: declare @llvm.riscv.vmsle.nxv2i64.i64( , i64, - i64); - -define @intrinsic_vmsle_vx_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vmsle.vx v0, v8, a0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsle_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsle_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vmsle.vv v0, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsle_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vmsle.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsle.nxv2i64.i64( %0, i64 %1, - i64 %2) + iXLen %2) ret %a } @@ -1714,24 +1755,39 @@ declare @llvm.riscv.vmsle.mask.nxv2i64.i64( , i64, , - i64); - -define @intrinsic_vmsle_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsle.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsle_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmv1r.v v11, v0 +; RV32-NEXT: vmv1r.v v0, v10 +; RV32-NEXT: vmsle.vv v11, v8, v12, v0.t +; RV32-NEXT: vmv1r.v v0, v11 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v11, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vmv1r.v v0, v10 +; RV64-NEXT: vmsle.vx v11, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v11 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsle.mask.nxv2i64.i64( %0, %1, i64 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1739,19 +1795,31 @@ entry: declare @llvm.riscv.vmsle.nxv4i64.i64( , i64, - i64); - -define @intrinsic_vmsle_vx_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vmsle.vx v0, v8, a0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsle_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsle_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmsle.vv v0, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsle_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmsle.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsle.nxv4i64.i64( %0, i64 %1, - i64 %2) + iXLen %2) ret %a } @@ -1761,29 +1829,44 @@ declare @llvm.riscv.vmsle.mask.nxv4i64.i64( , i64, , - i64); - -define @intrinsic_vmsle_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsle.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsle_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vmv1r.v v13, v0 +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: vmsle.vv v13, v8, v16, v0.t +; RV32-NEXT: vmv1r.v v0, v13 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v13, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: vmsle.vx v13, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v13 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsle.mask.nxv4i64.i64( %0, %1, i64 %2, %3, - i64 %4) + iXLen %4) ret %a } -define @intrinsic_vmsle_vi_nxv1i8_i8( %0, i64 %1) nounwind { +define @intrinsic_vmsle_vi_nxv1i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -1793,12 +1876,12 @@ entry: %a = call @llvm.riscv.vmsle.nxv1i8.i8( %0, i8 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsle_mask_vi_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsle_mask_vi_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1813,12 +1896,12 @@ entry: %1, i8 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsle_vi_nxv2i8_i8( %0, i64 %1) nounwind { +define @intrinsic_vmsle_vi_nxv2i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -1828,12 +1911,12 @@ entry: %a = call @llvm.riscv.vmsle.nxv2i8.i8( %0, i8 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsle_mask_vi_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsle_mask_vi_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1848,12 +1931,12 @@ entry: %1, i8 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsle_vi_nxv4i8_i8( %0, i64 %1) nounwind { +define @intrinsic_vmsle_vi_nxv4i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -1863,12 +1946,12 @@ entry: %a = call @llvm.riscv.vmsle.nxv4i8.i8( %0, i8 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsle_mask_vi_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsle_mask_vi_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1883,12 +1966,12 @@ entry: %1, i8 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsle_vi_nxv8i8_i8( %0, i64 %1) nounwind { +define @intrinsic_vmsle_vi_nxv8i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -1898,12 +1981,12 @@ entry: %a = call @llvm.riscv.vmsle.nxv8i8.i8( %0, i8 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsle_mask_vi_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsle_mask_vi_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1918,12 +2001,12 @@ entry: %1, i8 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsle_vi_nxv16i8_i8( %0, i64 %1) nounwind { +define @intrinsic_vmsle_vi_nxv16i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -1933,12 +2016,12 @@ entry: %a = call @llvm.riscv.vmsle.nxv16i8.i8( %0, i8 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsle_mask_vi_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsle_mask_vi_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1953,12 +2036,12 @@ entry: %1, i8 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsle_vi_nxv32i8_i8( %0, i64 %1) nounwind { +define @intrinsic_vmsle_vi_nxv32i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -1968,12 +2051,12 @@ entry: %a = call @llvm.riscv.vmsle.nxv32i8.i8( %0, i8 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsle_mask_vi_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsle_mask_vi_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1988,12 +2071,12 @@ entry: %1, i8 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsle_vi_nxv1i16_i16( %0, i64 %1) nounwind { +define @intrinsic_vmsle_vi_nxv1i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -2003,12 +2086,12 @@ entry: %a = call @llvm.riscv.vmsle.nxv1i16.i16( %0, i16 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsle_mask_vi_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsle_mask_vi_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2023,12 +2106,12 @@ entry: %1, i16 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsle_vi_nxv2i16_i16( %0, i64 %1) nounwind { +define @intrinsic_vmsle_vi_nxv2i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -2038,12 +2121,12 @@ entry: %a = call @llvm.riscv.vmsle.nxv2i16.i16( %0, i16 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsle_mask_vi_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsle_mask_vi_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2058,12 +2141,12 @@ entry: %1, i16 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsle_vi_nxv4i16_i16( %0, i64 %1) nounwind { +define @intrinsic_vmsle_vi_nxv4i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -2073,12 +2156,12 @@ entry: %a = call @llvm.riscv.vmsle.nxv4i16.i16( %0, i16 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsle_mask_vi_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsle_mask_vi_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2093,12 +2176,12 @@ entry: %1, i16 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsle_vi_nxv8i16_i16( %0, i64 %1) nounwind { +define @intrinsic_vmsle_vi_nxv8i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -2108,12 +2191,12 @@ entry: %a = call @llvm.riscv.vmsle.nxv8i16.i16( %0, i16 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsle_mask_vi_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsle_mask_vi_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2128,12 +2211,12 @@ entry: %1, i16 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsle_vi_nxv16i16_i16( %0, i64 %1) nounwind { +define @intrinsic_vmsle_vi_nxv16i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -2143,12 +2226,12 @@ entry: %a = call @llvm.riscv.vmsle.nxv16i16.i16( %0, i16 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsle_mask_vi_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsle_mask_vi_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2163,12 +2246,12 @@ entry: %1, i16 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsle_vi_nxv1i32_i32( %0, i64 %1) nounwind { +define @intrinsic_vmsle_vi_nxv1i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -2178,12 +2261,12 @@ entry: %a = call @llvm.riscv.vmsle.nxv1i32.i32( %0, i32 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsle_mask_vi_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsle_mask_vi_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2198,12 +2281,12 @@ entry: %1, i32 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsle_vi_nxv2i32_i32( %0, i64 %1) nounwind { +define @intrinsic_vmsle_vi_nxv2i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -2213,12 +2296,12 @@ entry: %a = call @llvm.riscv.vmsle.nxv2i32.i32( %0, i32 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsle_mask_vi_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsle_mask_vi_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2233,12 +2316,12 @@ entry: %1, i32 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsle_vi_nxv4i32_i32( %0, i64 %1) nounwind { +define @intrinsic_vmsle_vi_nxv4i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -2248,12 +2331,12 @@ entry: %a = call @llvm.riscv.vmsle.nxv4i32.i32( %0, i32 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsle_mask_vi_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsle_mask_vi_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2268,12 +2351,12 @@ entry: %1, i32 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsle_vi_nxv8i32_i32( %0, i64 %1) nounwind { +define @intrinsic_vmsle_vi_nxv8i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -2283,12 +2366,12 @@ entry: %a = call @llvm.riscv.vmsle.nxv8i32.i32( %0, i32 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsle_mask_vi_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsle_mask_vi_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2303,12 +2386,12 @@ entry: %1, i32 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsle_vi_nxv1i64_i64( %0, i64 %1) nounwind { +define @intrinsic_vmsle_vi_nxv1i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -2318,12 +2401,12 @@ entry: %a = call @llvm.riscv.vmsle.nxv1i64.i64( %0, i64 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsle_mask_vi_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsle_mask_vi_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2338,12 +2421,12 @@ entry: %1, i64 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsle_vi_nxv2i64_i64( %0, i64 %1) nounwind { +define @intrinsic_vmsle_vi_nxv2i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -2353,12 +2436,12 @@ entry: %a = call @llvm.riscv.vmsle.nxv2i64.i64( %0, i64 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsle_mask_vi_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsle_mask_vi_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2373,12 +2456,12 @@ entry: %1, i64 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsle_vi_nxv4i64_i64( %0, i64 %1) nounwind { +define @intrinsic_vmsle_vi_nxv4i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -2388,12 +2471,12 @@ entry: %a = call @llvm.riscv.vmsle.nxv4i64.i64( %0, i64 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsle_mask_vi_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsle_mask_vi_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2408,7 +2491,7 @@ entry: %1, i64 9, %2, - i64 %3) + iXLen %3) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll deleted file mode 100644 index f933c99..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll +++ /dev/null @@ -1,2414 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmsleu.nxv1i8( - , - , - i64); - -define @intrinsic_vmsleu_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv1i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv1i8( - , - , - , - , - i64); - -define @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmsleu.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsleu.nxv1i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsleu.mask.nxv1i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv2i8( - , - , - i64); - -define @intrinsic_vmsleu_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv2i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv2i8( - , - , - , - , - i64); - -define @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmsleu.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsleu.nxv2i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsleu.mask.nxv2i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv4i8( - , - , - i64); - -define @intrinsic_vmsleu_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv4i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv4i8( - , - , - , - , - i64); - -define @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmsleu.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsleu.nxv4i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsleu.mask.nxv4i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv8i8( - , - , - i64); - -define @intrinsic_vmsleu_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv8i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv8i8( - , - , - , - , - i64); - -define @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmsleu.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsleu.nxv8i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsleu.mask.nxv8i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv16i8( - , - , - i64); - -define @intrinsic_vmsleu_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vv_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv16i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv16i8( - , - , - , - , - i64); - -define @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmsleu.vv v14, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsleu.vv v8, v10, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsleu.nxv16i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsleu.mask.nxv16i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv32i8( - , - , - i64); - -define @intrinsic_vmsleu_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vv_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv32i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv32i8( - , - , - , - , - i64); - -define @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmsleu.vv v20, v8, v12 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsleu.vv v8, v12, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsleu.nxv32i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsleu.mask.nxv32i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv1i16( - , - , - i64); - -define @intrinsic_vmsleu_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv1i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv1i16( - , - , - , - , - i64); - -define @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmsleu.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsleu.nxv1i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsleu.mask.nxv1i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv2i16( - , - , - i64); - -define @intrinsic_vmsleu_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv2i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv2i16( - , - , - , - , - i64); - -define @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmsleu.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsleu.nxv2i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsleu.mask.nxv2i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv4i16( - , - , - i64); - -define @intrinsic_vmsleu_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv4i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv4i16( - , - , - , - , - i64); - -define @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmsleu.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsleu.nxv4i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsleu.mask.nxv4i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv8i16( - , - , - i64); - -define @intrinsic_vmsleu_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv8i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv8i16( - , - , - , - , - i64); - -define @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmsleu.vv v14, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsleu.vv v8, v10, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsleu.nxv8i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsleu.mask.nxv8i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv16i16( - , - , - i64); - -define @intrinsic_vmsleu_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vv_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv16i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv16i16( - , - , - , - , - i64); - -define @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmsleu.vv v20, v8, v12 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsleu.vv v8, v12, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsleu.nxv16i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsleu.mask.nxv16i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv1i32( - , - , - i64); - -define @intrinsic_vmsleu_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv1i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv1i32( - , - , - , - , - i64); - -define @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmsleu.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsleu.nxv1i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsleu.mask.nxv1i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv2i32( - , - , - i64); - -define @intrinsic_vmsleu_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv2i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv2i32( - , - , - , - , - i64); - -define @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmsleu.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsleu.nxv2i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsleu.mask.nxv2i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv4i32( - , - , - i64); - -define @intrinsic_vmsleu_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv4i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv4i32( - , - , - , - , - i64); - -define @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmsleu.vv v14, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsleu.vv v8, v10, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsleu.nxv4i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsleu.mask.nxv4i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv8i32( - , - , - i64); - -define @intrinsic_vmsleu_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv8i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv8i32( - , - , - , - , - i64); - -define @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmsleu.vv v20, v8, v12 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsleu.vv v8, v12, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsleu.nxv8i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsleu.mask.nxv8i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv1i64( - , - , - i64); - -define @intrinsic_vmsleu_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv1i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv1i64( - , - , - , - , - i64); - -define @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmsleu.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsleu.nxv1i64( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsleu.mask.nxv1i64( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv2i64( - , - , - i64); - -define @intrinsic_vmsleu_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv2i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv2i64( - , - , - , - , - i64); - -define @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmsleu.vv v14, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsleu.vv v8, v10, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsleu.nxv2i64( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsleu.mask.nxv2i64( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv4i64( - , - , - i64); - -define @intrinsic_vmsleu_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmsleu.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv4i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv4i64( - , - , - , - , - i64); - -define @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmsleu.vv v20, v8, v12 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsleu.vv v8, v12, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsleu.nxv4i64( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsleu.mask.nxv4i64( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv1i8.i8( - , - i8, - i64); - -define @intrinsic_vmsleu_vx_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vmsleu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv1i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv1i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsleu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv1i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv2i8.i8( - , - i8, - i64); - -define @intrinsic_vmsleu_vx_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vmsleu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv2i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv2i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsleu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv2i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv4i8.i8( - , - i8, - i64); - -define @intrinsic_vmsleu_vx_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vmsleu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv4i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv4i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsleu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv4i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv8i8.i8( - , - i8, - i64); - -define @intrinsic_vmsleu_vx_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vmsleu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv8i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv8i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsleu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv8i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv16i8.i8( - , - i8, - i64); - -define @intrinsic_vmsleu_vx_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vx_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vmsleu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv16i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv16i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsleu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsleu.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv16i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv32i8.i8( - , - i8, - i64); - -define @intrinsic_vmsleu_vx_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vx_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vmsleu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv32i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv32i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsleu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsleu.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv32i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv1i16.i16( - , - i16, - i64); - -define @intrinsic_vmsleu_vx_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vmsleu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv1i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv1i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsleu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv1i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv2i16.i16( - , - i16, - i64); - -define @intrinsic_vmsleu_vx_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vmsleu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv2i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv2i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsleu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv2i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv4i16.i16( - , - i16, - i64); - -define @intrinsic_vmsleu_vx_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vmsleu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv4i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv4i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsleu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv4i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv8i16.i16( - , - i16, - i64); - -define @intrinsic_vmsleu_vx_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vmsleu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv8i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv8i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsleu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsleu.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv8i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv16i16.i16( - , - i16, - i64); - -define @intrinsic_vmsleu_vx_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vx_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vmsleu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv16i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv16i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsleu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsleu.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv16i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv1i32.i32( - , - i32, - i64); - -define @intrinsic_vmsleu_vx_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vmsleu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv1i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv1i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmsleu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv1i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv2i32.i32( - , - i32, - i64); - -define @intrinsic_vmsleu_vx_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vmsleu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv2i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv2i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmsleu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv2i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv4i32.i32( - , - i32, - i64); - -define @intrinsic_vmsleu_vx_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vmsleu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv4i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv4i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmsleu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsleu.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv4i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv8i32.i32( - , - i32, - i64); - -define @intrinsic_vmsleu_vx_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vmsleu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv8i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv8i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmsleu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsleu.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv8i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv1i64.i64( - , - i64, - i64); - -define @intrinsic_vmsleu_vx_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vmsleu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv1i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv1i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vmsleu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv2i64.i64( - , - i64, - i64); - -define @intrinsic_vmsleu_vx_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vmsleu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv2i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv2i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vmsleu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsleu.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsleu.nxv4i64.i64( - , - i64, - i64); - -define @intrinsic_vmsleu_vx_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vmsleu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv4i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsleu.mask.nxv4i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vmsleu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsleu.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4) - - ret %a -} - -define @intrinsic_vmsleu_vi_nxv1i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv1i8.i8( - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsleu_mask_vi_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv1i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsleu_vi_nxv2i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv2i8.i8( - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsleu_mask_vi_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv2i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsleu_vi_nxv4i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv4i8.i8( - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsleu_mask_vi_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv4i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsleu_vi_nxv8i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv8i8.i8( - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsleu_mask_vi_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv8i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsleu_vi_nxv16i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vi_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv16i8.i8( - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsleu_mask_vi_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsleu.vi v11, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv16i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsleu_vi_nxv32i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vi_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv32i8.i8( - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsleu_mask_vi_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsleu.vi v13, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv32i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsleu_vi_nxv1i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv1i16.i16( - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsleu_mask_vi_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv1i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsleu_vi_nxv2i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv2i16.i16( - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsleu_mask_vi_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv2i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsleu_vi_nxv4i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv4i16.i16( - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsleu_mask_vi_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv4i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsleu_vi_nxv8i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv8i16.i16( - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsleu_mask_vi_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsleu.vi v11, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv8i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsleu_vi_nxv16i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vi_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv16i16.i16( - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsleu_mask_vi_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsleu.vi v13, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv16i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsleu_vi_nxv1i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv1i32.i32( - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsleu_mask_vi_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv1i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsleu_vi_nxv2i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv2i32.i32( - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsleu_mask_vi_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv2i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsleu_vi_nxv4i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv4i32.i32( - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsleu_mask_vi_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsleu.vi v11, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv4i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsleu_vi_nxv8i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv8i32.i32( - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsleu_mask_vi_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsleu.vi v13, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv8i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsleu_vi_nxv1i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv1i64.i64( - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsleu_mask_vi_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv1i64.i64( - %0, - %1, - i64 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsleu_vi_nxv2i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv2i64.i64( - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsleu_mask_vi_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsleu.vi v11, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv2i64.i64( - %0, - %1, - i64 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsleu_vi_nxv4i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.nxv4i64.i64( - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsleu_mask_vi_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsleu.vi v13, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsleu.mask.nxv4i64.i64( - %0, - %1, - i64 9, - %2, - i64 %3) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll similarity index 85% rename from llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vmsleu.ll index 6580b5a..cc17def 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vmsleu.nxv1i8( , , - i32); + iXLen); -define @intrinsic_vmsleu_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv1i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -26,9 +28,9 @@ declare @llvm.riscv.vmsleu.mask.nxv1i8( , , , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -42,13 +44,13 @@ entry: %mask = call @llvm.riscv.vmsleu.nxv1i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsleu.mask.nxv1i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -56,9 +58,9 @@ entry: declare @llvm.riscv.vmsleu.nxv2i8( , , - i32); + iXLen); -define @intrinsic_vmsleu_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -68,7 +70,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv2i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -78,9 +80,9 @@ declare @llvm.riscv.vmsleu.mask.nxv2i8( , , , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -94,13 +96,13 @@ entry: %mask = call @llvm.riscv.vmsleu.nxv2i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsleu.mask.nxv2i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -108,9 +110,9 @@ entry: declare @llvm.riscv.vmsleu.nxv4i8( , , - i32); + iXLen); -define @intrinsic_vmsleu_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -120,7 +122,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv4i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -130,9 +132,9 @@ declare @llvm.riscv.vmsleu.mask.nxv4i8( , , , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -146,13 +148,13 @@ entry: %mask = call @llvm.riscv.vmsleu.nxv4i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsleu.mask.nxv4i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -160,9 +162,9 @@ entry: declare @llvm.riscv.vmsleu.nxv8i8( , , - i32); + iXLen); -define @intrinsic_vmsleu_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -172,7 +174,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv8i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -182,9 +184,9 @@ declare @llvm.riscv.vmsleu.mask.nxv8i8( , , , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -198,13 +200,13 @@ entry: %mask = call @llvm.riscv.vmsleu.nxv8i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsleu.mask.nxv8i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -212,9 +214,9 @@ entry: declare @llvm.riscv.vmsleu.nxv16i8( , , - i32); + iXLen); -define @intrinsic_vmsleu_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -224,7 +226,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv16i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -234,9 +236,9 @@ declare @llvm.riscv.vmsleu.mask.nxv16i8( , , , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -250,13 +252,13 @@ entry: %mask = call @llvm.riscv.vmsleu.nxv16i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsleu.mask.nxv16i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -264,9 +266,9 @@ entry: declare @llvm.riscv.vmsleu.nxv32i8( , , - i32); + iXLen); -define @intrinsic_vmsleu_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -276,7 +278,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv32i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -286,9 +288,9 @@ declare @llvm.riscv.vmsleu.mask.nxv32i8( , , , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -302,13 +304,13 @@ entry: %mask = call @llvm.riscv.vmsleu.nxv32i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsleu.mask.nxv32i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -316,9 +318,9 @@ entry: declare @llvm.riscv.vmsleu.nxv1i16( , , - i32); + iXLen); -define @intrinsic_vmsleu_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -328,7 +330,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv1i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -338,9 +340,9 @@ declare @llvm.riscv.vmsleu.mask.nxv1i16( , , , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -354,13 +356,13 @@ entry: %mask = call @llvm.riscv.vmsleu.nxv1i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsleu.mask.nxv1i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -368,9 +370,9 @@ entry: declare @llvm.riscv.vmsleu.nxv2i16( , , - i32); + iXLen); -define @intrinsic_vmsleu_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -380,7 +382,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv2i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -390,9 +392,9 @@ declare @llvm.riscv.vmsleu.mask.nxv2i16( , , , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -406,13 +408,13 @@ entry: %mask = call @llvm.riscv.vmsleu.nxv2i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsleu.mask.nxv2i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -420,9 +422,9 @@ entry: declare @llvm.riscv.vmsleu.nxv4i16( , , - i32); + iXLen); -define @intrinsic_vmsleu_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -432,7 +434,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv4i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vmsleu.mask.nxv4i16( , , , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -458,13 +460,13 @@ entry: %mask = call @llvm.riscv.vmsleu.nxv4i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsleu.mask.nxv4i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -472,9 +474,9 @@ entry: declare @llvm.riscv.vmsleu.nxv8i16( , , - i32); + iXLen); -define @intrinsic_vmsleu_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -484,7 +486,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv8i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -494,9 +496,9 @@ declare @llvm.riscv.vmsleu.mask.nxv8i16( , , , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -510,13 +512,13 @@ entry: %mask = call @llvm.riscv.vmsleu.nxv8i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsleu.mask.nxv8i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -524,9 +526,9 @@ entry: declare @llvm.riscv.vmsleu.nxv16i16( , , - i32); + iXLen); -define @intrinsic_vmsleu_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -536,7 +538,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv16i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -546,9 +548,9 @@ declare @llvm.riscv.vmsleu.mask.nxv16i16( , , , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -562,13 +564,13 @@ entry: %mask = call @llvm.riscv.vmsleu.nxv16i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsleu.mask.nxv16i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -576,9 +578,9 @@ entry: declare @llvm.riscv.vmsleu.nxv1i32( , , - i32); + iXLen); -define @intrinsic_vmsleu_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -588,7 +590,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv1i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -598,9 +600,9 @@ declare @llvm.riscv.vmsleu.mask.nxv1i32( , , , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -614,13 +616,13 @@ entry: %mask = call @llvm.riscv.vmsleu.nxv1i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsleu.mask.nxv1i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -628,9 +630,9 @@ entry: declare @llvm.riscv.vmsleu.nxv2i32( , , - i32); + iXLen); -define @intrinsic_vmsleu_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -640,7 +642,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv2i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -650,9 +652,9 @@ declare @llvm.riscv.vmsleu.mask.nxv2i32( , , , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -666,13 +668,13 @@ entry: %mask = call @llvm.riscv.vmsleu.nxv2i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsleu.mask.nxv2i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -680,9 +682,9 @@ entry: declare @llvm.riscv.vmsleu.nxv4i32( , , - i32); + iXLen); -define @intrinsic_vmsleu_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -692,7 +694,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv4i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -702,9 +704,9 @@ declare @llvm.riscv.vmsleu.mask.nxv4i32( , , , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -718,13 +720,13 @@ entry: %mask = call @llvm.riscv.vmsleu.nxv4i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsleu.mask.nxv4i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -732,9 +734,9 @@ entry: declare @llvm.riscv.vmsleu.nxv8i32( , , - i32); + iXLen); -define @intrinsic_vmsleu_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -744,7 +746,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv8i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -754,9 +756,9 @@ declare @llvm.riscv.vmsleu.mask.nxv8i32( , , , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -770,13 +772,13 @@ entry: %mask = call @llvm.riscv.vmsleu.nxv8i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsleu.mask.nxv8i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -784,9 +786,9 @@ entry: declare @llvm.riscv.vmsleu.nxv1i64( , , - i32); + iXLen); -define @intrinsic_vmsleu_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -796,7 +798,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv1i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -806,9 +808,9 @@ declare @llvm.riscv.vmsleu.mask.nxv1i64( , , , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -822,13 +824,13 @@ entry: %mask = call @llvm.riscv.vmsleu.nxv1i64( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsleu.mask.nxv1i64( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -836,9 +838,9 @@ entry: declare @llvm.riscv.vmsleu.nxv2i64( , , - i32); + iXLen); -define @intrinsic_vmsleu_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -848,7 +850,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv2i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -858,9 +860,9 @@ declare @llvm.riscv.vmsleu.mask.nxv2i64( , , , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -874,13 +876,13 @@ entry: %mask = call @llvm.riscv.vmsleu.nxv2i64( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsleu.mask.nxv2i64( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -888,9 +890,9 @@ entry: declare @llvm.riscv.vmsleu.nxv4i64( , , - i32); + iXLen); -define @intrinsic_vmsleu_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -900,7 +902,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv4i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -910,9 +912,9 @@ declare @llvm.riscv.vmsleu.mask.nxv4i64( , , , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -926,13 +928,13 @@ entry: %mask = call @llvm.riscv.vmsleu.nxv4i64( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsleu.mask.nxv4i64( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -940,9 +942,9 @@ entry: declare @llvm.riscv.vmsleu.nxv1i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsleu_vx_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -952,7 +954,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv1i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -962,9 +964,9 @@ declare @llvm.riscv.vmsleu.mask.nxv1i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -979,7 +981,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -987,9 +989,9 @@ entry: declare @llvm.riscv.vmsleu.nxv2i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsleu_vx_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -999,7 +1001,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv2i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1009,9 +1011,9 @@ declare @llvm.riscv.vmsleu.mask.nxv2i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1026,7 +1028,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1034,9 +1036,9 @@ entry: declare @llvm.riscv.vmsleu.nxv4i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsleu_vx_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -1046,7 +1048,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv4i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1056,9 +1058,9 @@ declare @llvm.riscv.vmsleu.mask.nxv4i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1073,7 +1075,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1081,9 +1083,9 @@ entry: declare @llvm.riscv.vmsleu.nxv8i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsleu_vx_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -1093,7 +1095,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv8i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1103,9 +1105,9 @@ declare @llvm.riscv.vmsleu.mask.nxv8i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1120,7 +1122,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1128,9 +1130,9 @@ entry: declare @llvm.riscv.vmsleu.nxv16i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsleu_vx_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -1140,7 +1142,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv16i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1150,9 +1152,9 @@ declare @llvm.riscv.vmsleu.mask.nxv16i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1167,7 +1169,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1175,9 +1177,9 @@ entry: declare @llvm.riscv.vmsleu.nxv32i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsleu_vx_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -1187,7 +1189,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv32i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1197,9 +1199,9 @@ declare @llvm.riscv.vmsleu.mask.nxv32i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1214,7 +1216,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1222,9 +1224,9 @@ entry: declare @llvm.riscv.vmsleu.nxv1i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsleu_vx_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -1234,7 +1236,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv1i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1244,9 +1246,9 @@ declare @llvm.riscv.vmsleu.mask.nxv1i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1261,7 +1263,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1269,9 +1271,9 @@ entry: declare @llvm.riscv.vmsleu.nxv2i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsleu_vx_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -1281,7 +1283,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv2i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1291,9 +1293,9 @@ declare @llvm.riscv.vmsleu.mask.nxv2i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1308,7 +1310,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1316,9 +1318,9 @@ entry: declare @llvm.riscv.vmsleu.nxv4i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsleu_vx_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -1328,7 +1330,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv4i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1338,9 +1340,9 @@ declare @llvm.riscv.vmsleu.mask.nxv4i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1355,7 +1357,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1363,9 +1365,9 @@ entry: declare @llvm.riscv.vmsleu.nxv8i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsleu_vx_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -1375,7 +1377,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv8i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1385,9 +1387,9 @@ declare @llvm.riscv.vmsleu.mask.nxv8i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1402,7 +1404,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1410,9 +1412,9 @@ entry: declare @llvm.riscv.vmsleu.nxv16i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsleu_vx_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -1422,7 +1424,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv16i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1432,9 +1434,9 @@ declare @llvm.riscv.vmsleu.mask.nxv16i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1449,7 +1451,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1457,9 +1459,9 @@ entry: declare @llvm.riscv.vmsleu.nxv1i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsleu_vx_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -1469,7 +1471,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv1i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1479,9 +1481,9 @@ declare @llvm.riscv.vmsleu.mask.nxv1i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1496,7 +1498,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1504,9 +1506,9 @@ entry: declare @llvm.riscv.vmsleu.nxv2i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsleu_vx_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -1516,7 +1518,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv2i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1526,9 +1528,9 @@ declare @llvm.riscv.vmsleu.mask.nxv2i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1543,7 +1545,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1551,9 +1553,9 @@ entry: declare @llvm.riscv.vmsleu.nxv4i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsleu_vx_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -1563,7 +1565,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv4i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1573,9 +1575,9 @@ declare @llvm.riscv.vmsleu.mask.nxv4i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1590,7 +1592,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1598,9 +1600,9 @@ entry: declare @llvm.riscv.vmsleu.nxv8i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsleu_vx_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsleu_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -1610,7 +1612,7 @@ entry: %a = call @llvm.riscv.vmsleu.nxv8i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1620,9 +1622,9 @@ declare @llvm.riscv.vmsleu.mask.nxv8i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsleu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsleu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1637,7 +1639,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1645,25 +1647,31 @@ entry: declare @llvm.riscv.vmsleu.nxv1i64.i64( , i64, - i32); - -define @intrinsic_vmsleu_vx_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vmsleu.vv v0, v8, v9 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsleu_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsleu_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vmsleu.vv v0, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsleu_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmsleu.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsleu.nxv1i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1673,30 +1681,39 @@ declare @llvm.riscv.vmsleu.mask.nxv1i64.i64( , i64, , - i32); - -define @intrinsic_vmsleu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v11, (a0), zero -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vv v10, v8, v11, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsleu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v11, (a0), zero +; RV32-NEXT: vmv1r.v v10, v0 +; RV32-NEXT: vmv1r.v v0, v9 +; RV32-NEXT: vmsleu.vv v10, v8, v11, v0.t +; RV32-NEXT: vmv.v.v v0, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v10, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vmv1r.v v0, v9 +; RV64-NEXT: vmsleu.vx v10, v8, a0, v0.t +; RV64-NEXT: vmv.v.v v0, v10 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsleu.mask.nxv1i64.i64( %0, %1, i64 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1704,25 +1721,31 @@ entry: declare @llvm.riscv.vmsleu.nxv2i64.i64( , i64, - i32); - -define @intrinsic_vmsleu_vx_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vmsleu.vv v0, v8, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsleu_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsleu_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vmsleu.vv v0, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsleu_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vmsleu.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsleu.nxv2i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1732,30 +1755,39 @@ declare @llvm.riscv.vmsleu.mask.nxv2i64.i64( , i64, , - i32); - -define @intrinsic_vmsleu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsleu.vv v11, v8, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsleu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmv1r.v v11, v0 +; RV32-NEXT: vmv1r.v v0, v10 +; RV32-NEXT: vmsleu.vv v11, v8, v12, v0.t +; RV32-NEXT: vmv1r.v v0, v11 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v11, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vmv1r.v v0, v10 +; RV64-NEXT: vmsleu.vx v11, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v11 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsleu.mask.nxv2i64.i64( %0, %1, i64 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1763,25 +1795,31 @@ entry: declare @llvm.riscv.vmsleu.nxv4i64.i64( , i64, - i32); - -define @intrinsic_vmsleu_vx_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmsleu.vv v0, v8, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsleu_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsleu_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmsleu.vv v0, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsleu_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmsleu.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsleu.nxv4i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1791,35 +1829,44 @@ declare @llvm.riscv.vmsleu.mask.nxv4i64.i64( , i64, , - i32); - -define @intrinsic_vmsleu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsleu.vv v13, v8, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsleu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vmv1r.v v13, v0 +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: vmsleu.vv v13, v8, v16, v0.t +; RV32-NEXT: vmv1r.v v0, v13 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v13, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: vmsleu.vx v13, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v13 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsleu.mask.nxv4i64.i64( %0, %1, i64 %2, %3, - i32 %4) + iXLen %4) ret %a } -define @intrinsic_vmsleu_vi_nxv1i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsleu_vi_nxv1i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -1829,12 +1876,12 @@ entry: %a = call @llvm.riscv.vmsleu.nxv1i8.i8( %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsleu_mask_vi_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsleu_mask_vi_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1849,12 +1896,12 @@ entry: %1, i8 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsleu_vi_nxv2i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsleu_vi_nxv2i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -1864,12 +1911,12 @@ entry: %a = call @llvm.riscv.vmsleu.nxv2i8.i8( %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsleu_mask_vi_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsleu_mask_vi_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1884,12 +1931,12 @@ entry: %1, i8 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsleu_vi_nxv4i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsleu_vi_nxv4i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -1899,12 +1946,12 @@ entry: %a = call @llvm.riscv.vmsleu.nxv4i8.i8( %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsleu_mask_vi_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsleu_mask_vi_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1919,12 +1966,12 @@ entry: %1, i8 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsleu_vi_nxv8i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsleu_vi_nxv8i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -1934,12 +1981,12 @@ entry: %a = call @llvm.riscv.vmsleu.nxv8i8.i8( %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsleu_mask_vi_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsleu_mask_vi_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1954,12 +2001,12 @@ entry: %1, i8 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsleu_vi_nxv16i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsleu_vi_nxv16i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -1969,12 +2016,12 @@ entry: %a = call @llvm.riscv.vmsleu.nxv16i8.i8( %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsleu_mask_vi_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsleu_mask_vi_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1989,12 +2036,12 @@ entry: %1, i8 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsleu_vi_nxv32i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsleu_vi_nxv32i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -2004,12 +2051,12 @@ entry: %a = call @llvm.riscv.vmsleu.nxv32i8.i8( %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsleu_mask_vi_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsleu_mask_vi_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2024,12 +2071,12 @@ entry: %1, i8 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsleu_vi_nxv1i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsleu_vi_nxv1i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -2039,12 +2086,12 @@ entry: %a = call @llvm.riscv.vmsleu.nxv1i16.i16( %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsleu_mask_vi_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsleu_mask_vi_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2059,12 +2106,12 @@ entry: %1, i16 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsleu_vi_nxv2i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsleu_vi_nxv2i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -2074,12 +2121,12 @@ entry: %a = call @llvm.riscv.vmsleu.nxv2i16.i16( %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsleu_mask_vi_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsleu_mask_vi_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2094,12 +2141,12 @@ entry: %1, i16 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsleu_vi_nxv4i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsleu_vi_nxv4i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -2109,12 +2156,12 @@ entry: %a = call @llvm.riscv.vmsleu.nxv4i16.i16( %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsleu_mask_vi_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsleu_mask_vi_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2129,12 +2176,12 @@ entry: %1, i16 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsleu_vi_nxv8i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsleu_vi_nxv8i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -2144,12 +2191,12 @@ entry: %a = call @llvm.riscv.vmsleu.nxv8i16.i16( %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsleu_mask_vi_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsleu_mask_vi_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2164,12 +2211,12 @@ entry: %1, i16 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsleu_vi_nxv16i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsleu_vi_nxv16i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -2179,12 +2226,12 @@ entry: %a = call @llvm.riscv.vmsleu.nxv16i16.i16( %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsleu_mask_vi_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsleu_mask_vi_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2199,12 +2246,12 @@ entry: %1, i16 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsleu_vi_nxv1i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmsleu_vi_nxv1i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -2214,12 +2261,12 @@ entry: %a = call @llvm.riscv.vmsleu.nxv1i32.i32( %0, i32 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsleu_mask_vi_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsleu_mask_vi_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2234,12 +2281,12 @@ entry: %1, i32 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsleu_vi_nxv2i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmsleu_vi_nxv2i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -2249,12 +2296,12 @@ entry: %a = call @llvm.riscv.vmsleu.nxv2i32.i32( %0, i32 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsleu_mask_vi_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsleu_mask_vi_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2269,12 +2316,12 @@ entry: %1, i32 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsleu_vi_nxv4i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmsleu_vi_nxv4i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -2284,12 +2331,12 @@ entry: %a = call @llvm.riscv.vmsleu.nxv4i32.i32( %0, i32 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsleu_mask_vi_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsleu_mask_vi_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2304,12 +2351,12 @@ entry: %1, i32 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsleu_vi_nxv8i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmsleu_vi_nxv8i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -2319,12 +2366,12 @@ entry: %a = call @llvm.riscv.vmsleu.nxv8i32.i32( %0, i32 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsleu_mask_vi_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsleu_mask_vi_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2339,12 +2386,12 @@ entry: %1, i32 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsleu_vi_nxv1i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vmsleu_vi_nxv1i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -2354,12 +2401,12 @@ entry: %a = call @llvm.riscv.vmsleu.nxv1i64.i64( %0, i64 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsleu_mask_vi_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsleu_mask_vi_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2374,12 +2421,12 @@ entry: %1, i64 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsleu_vi_nxv2i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vmsleu_vi_nxv2i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -2389,12 +2436,12 @@ entry: %a = call @llvm.riscv.vmsleu.nxv2i64.i64( %0, i64 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsleu_mask_vi_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsleu_mask_vi_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2409,12 +2456,12 @@ entry: %1, i64 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsleu_vi_nxv4i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vmsleu_vi_nxv4i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -2424,12 +2471,12 @@ entry: %a = call @llvm.riscv.vmsleu.nxv4i64.i64( %0, i64 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsleu_mask_vi_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsleu_mask_vi_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2444,7 +2491,7 @@ entry: %1, i64 9, %2, - i32 %3) + iXLen %3) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll deleted file mode 100644 index 14a1340..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll +++ /dev/null @@ -1,2414 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmslt.nxv1i8( - , - , - i64); - -define @intrinsic_vmslt_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv1i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv1i8( - , - , - , - , - i64); - -define @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmslt.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmslt.nxv1i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmslt.mask.nxv1i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv2i8( - , - , - i64); - -define @intrinsic_vmslt_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv2i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv2i8( - , - , - , - , - i64); - -define @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmslt.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmslt.nxv2i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmslt.mask.nxv2i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv4i8( - , - , - i64); - -define @intrinsic_vmslt_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv4i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv4i8( - , - , - , - , - i64); - -define @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmslt.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmslt.nxv4i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmslt.mask.nxv4i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv8i8( - , - , - i64); - -define @intrinsic_vmslt_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv8i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv8i8( - , - , - , - , - i64); - -define @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmslt.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmslt.nxv8i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmslt.mask.nxv8i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv16i8( - , - , - i64); - -define @intrinsic_vmslt_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmslt.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv16i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv16i8( - , - , - , - , - i64); - -define @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmslt.vv v14, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmslt.vv v8, v10, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmslt.nxv16i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmslt.mask.nxv16i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv32i8( - , - , - i64); - -define @intrinsic_vmslt_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vv_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmslt.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv32i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv32i8( - , - , - , - , - i64); - -define @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmslt.vv v20, v8, v12 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmslt.vv v8, v12, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmslt.nxv32i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmslt.mask.nxv32i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv1i16( - , - , - i64); - -define @intrinsic_vmslt_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv1i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv1i16( - , - , - , - , - i64); - -define @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmslt.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmslt.nxv1i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmslt.mask.nxv1i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv2i16( - , - , - i64); - -define @intrinsic_vmslt_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv2i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv2i16( - , - , - , - , - i64); - -define @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmslt.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmslt.nxv2i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmslt.mask.nxv2i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv4i16( - , - , - i64); - -define @intrinsic_vmslt_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv4i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv4i16( - , - , - , - , - i64); - -define @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmslt.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmslt.nxv4i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmslt.mask.nxv4i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv8i16( - , - , - i64); - -define @intrinsic_vmslt_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmslt.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv8i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv8i16( - , - , - , - , - i64); - -define @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmslt.vv v14, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmslt.vv v8, v10, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmslt.nxv8i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmslt.mask.nxv8i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv16i16( - , - , - i64); - -define @intrinsic_vmslt_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmslt.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv16i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv16i16( - , - , - , - , - i64); - -define @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmslt.vv v20, v8, v12 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmslt.vv v8, v12, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmslt.nxv16i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmslt.mask.nxv16i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv1i32( - , - , - i64); - -define @intrinsic_vmslt_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv1i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv1i32( - , - , - , - , - i64); - -define @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmslt.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmslt.nxv1i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmslt.mask.nxv1i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv2i32( - , - , - i64); - -define @intrinsic_vmslt_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv2i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv2i32( - , - , - , - , - i64); - -define @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmslt.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmslt.nxv2i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmslt.mask.nxv2i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv4i32( - , - , - i64); - -define @intrinsic_vmslt_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmslt.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv4i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv4i32( - , - , - , - , - i64); - -define @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmslt.vv v14, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmslt.vv v8, v10, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmslt.nxv4i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmslt.mask.nxv4i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv8i32( - , - , - i64); - -define @intrinsic_vmslt_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmslt.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv8i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv8i32( - , - , - , - , - i64); - -define @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmslt.vv v20, v8, v12 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmslt.vv v8, v12, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmslt.nxv8i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmslt.mask.nxv8i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv1i64( - , - , - i64); - -define @intrinsic_vmslt_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv1i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv1i64( - , - , - , - , - i64); - -define @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmslt.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmslt.nxv1i64( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmslt.mask.nxv1i64( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv2i64( - , - , - i64); - -define @intrinsic_vmslt_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmslt.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv2i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv2i64( - , - , - , - , - i64); - -define @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmslt.vv v14, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmslt.vv v8, v10, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmslt.nxv2i64( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmslt.mask.nxv2i64( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv4i64( - , - , - i64); - -define @intrinsic_vmslt_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmslt.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv4i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv4i64( - , - , - , - , - i64); - -define @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmslt.vv v20, v8, v12 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmslt.vv v8, v12, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmslt.nxv4i64( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmslt.mask.nxv4i64( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv1i8.i8( - , - i8, - i64); - -define @intrinsic_vmslt_vx_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vmslt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv1i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv1i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmslt_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv1i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv2i8.i8( - , - i8, - i64); - -define @intrinsic_vmslt_vx_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vmslt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv2i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv2i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmslt_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv2i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv4i8.i8( - , - i8, - i64); - -define @intrinsic_vmslt_vx_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vmslt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv4i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv4i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmslt_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv4i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv8i8.i8( - , - i8, - i64); - -define @intrinsic_vmslt_vx_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vmslt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv8i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv8i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmslt_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv8i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv16i8.i8( - , - i8, - i64); - -define @intrinsic_vmslt_vx_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vmslt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv16i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv16i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmslt_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv16i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv32i8.i8( - , - i8, - i64); - -define @intrinsic_vmslt_vx_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vx_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vmslt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv32i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv32i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmslt_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv32i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv1i16.i16( - , - i16, - i64); - -define @intrinsic_vmslt_vx_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vmslt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv1i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv1i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmslt_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv1i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv2i16.i16( - , - i16, - i64); - -define @intrinsic_vmslt_vx_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vmslt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv2i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv2i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmslt_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv2i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv4i16.i16( - , - i16, - i64); - -define @intrinsic_vmslt_vx_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vmslt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv4i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv4i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmslt_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv4i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv8i16.i16( - , - i16, - i64); - -define @intrinsic_vmslt_vx_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vmslt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv8i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv8i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmslt_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv8i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv16i16.i16( - , - i16, - i64); - -define @intrinsic_vmslt_vx_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vmslt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv16i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv16i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmslt_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv16i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv1i32.i32( - , - i32, - i64); - -define @intrinsic_vmslt_vx_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vmslt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv1i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv1i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmslt_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv1i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv2i32.i32( - , - i32, - i64); - -define @intrinsic_vmslt_vx_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vmslt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv2i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv2i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmslt_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv2i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv4i32.i32( - , - i32, - i64); - -define @intrinsic_vmslt_vx_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vmslt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv4i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv4i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmslt_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv4i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv8i32.i32( - , - i32, - i64); - -define @intrinsic_vmslt_vx_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vmslt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv8i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv8i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmslt_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv8i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv1i64.i64( - , - i64, - i64); - -define @intrinsic_vmslt_vx_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vmslt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv1i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv1i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vmslt_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv2i64.i64( - , - i64, - i64); - -define @intrinsic_vmslt_vx_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vmslt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv2i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv2i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vmslt_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmslt.nxv4i64.i64( - , - i64, - i64); - -define @intrinsic_vmslt_vx_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vmslt.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv4i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmslt.mask.nxv4i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vmslt_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4) - - ret %a -} - -define @intrinsic_vmslt_vi_nxv1i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, -16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv1i8.i8( - %0, - i8 -15, - i64 %1) - - ret %a -} - -define @intrinsic_vmslt_mask_vi_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vi v10, v8, -15, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv1i8.i8( - %0, - %1, - i8 -14, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmslt_vi_nxv2i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, -14 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv2i8.i8( - %0, - i8 -13, - i64 %1) - - ret %a -} - -define @intrinsic_vmslt_mask_vi_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vi v10, v8, -13, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv2i8.i8( - %0, - %1, - i8 -12, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmslt_vi_nxv4i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, -12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv4i8.i8( - %0, - i8 -11, - i64 %1) - - ret %a -} - -define @intrinsic_vmslt_mask_vi_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vi v10, v8, -11, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv4i8.i8( - %0, - %1, - i8 -10, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmslt_vi_nxv8i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, -10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv8i8.i8( - %0, - i8 -9, - i64 %1) - - ret %a -} - -define @intrinsic_vmslt_mask_vi_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vi v10, v8, -9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv8i8.i8( - %0, - %1, - i8 -8, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmslt_vi_nxv16i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, -8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv16i8.i8( - %0, - i8 -7, - i64 %1) - - ret %a -} - -define @intrinsic_vmslt_mask_vi_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsle.vi v11, v8, -7, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv16i8.i8( - %0, - %1, - i8 -6, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmslt_vi_nxv32i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vi_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, -6 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv32i8.i8( - %0, - i8 -5, - i64 %1) - - ret %a -} - -define @intrinsic_vmslt_mask_vi_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsle.vi v13, v8, -5, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv32i8.i8( - %0, - %1, - i8 -4, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmslt_vi_nxv1i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, -4 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv1i16.i16( - %0, - i16 -3, - i64 %1) - - ret %a -} - -define @intrinsic_vmslt_mask_vi_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vi v10, v8, -3, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv1i16.i16( - %0, - %1, - i16 -2, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmslt_vi_nxv2i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, -2 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv2i16.i16( - %0, - i16 -1, - i64 %1) - - ret %a -} - -define @intrinsic_vmslt_mask_vi_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vx v10, v8, zero, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv2i16.i16( - %0, - %1, - i16 0, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmslt_vi_nxv4i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmslt.vx v0, v8, zero -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv4i16.i16( - %0, - i16 0, - i64 %1) - - ret %a -} - -define @intrinsic_vmslt_mask_vi_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vi v10, v8, 0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv4i16.i16( - %0, - %1, - i16 1, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmslt_vi_nxv8i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, 1 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv8i16.i16( - %0, - i16 2, - i64 %1) - - ret %a -} - -define @intrinsic_vmslt_mask_vi_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsle.vi v11, v8, 2, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv8i16.i16( - %0, - %1, - i16 3, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmslt_vi_nxv16i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, 3 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv16i16.i16( - %0, - i16 4, - i64 %1) - - ret %a -} - -define @intrinsic_vmslt_mask_vi_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsle.vi v13, v8, 4, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv16i16.i16( - %0, - %1, - i16 5, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmslt_vi_nxv1i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, 5 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv1i32.i32( - %0, - i32 6, - i64 %1) - - ret %a -} - -define @intrinsic_vmslt_mask_vi_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vi v10, v8, 6, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv1i32.i32( - %0, - %1, - i32 7, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmslt_vi_nxv2i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, 7 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv2i32.i32( - %0, - i32 8, - i64 %1) - - ret %a -} - -define @intrinsic_vmslt_mask_vi_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vi v10, v8, 8, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv2i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmslt_vi_nxv4i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv4i32.i32( - %0, - i32 10, - i64 %1) - - ret %a -} - -define @intrinsic_vmslt_mask_vi_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsle.vi v11, v8, 10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv4i32.i32( - %0, - %1, - i32 11, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmslt_vi_nxv8i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, 11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv8i32.i32( - %0, - i32 12, - i64 %1) - - ret %a -} - -define @intrinsic_vmslt_mask_vi_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsle.vi v13, v8, 12, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv8i32.i32( - %0, - %1, - i32 13, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmslt_vi_nxv1i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, 13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv1i64.i64( - %0, - i64 14, - i64 %1) - - ret %a -} - -define @intrinsic_vmslt_mask_vi_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vi v10, v8, 14, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv1i64.i64( - %0, - %1, - i64 15, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmslt_vi_nxv2i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, 15 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv2i64.i64( - %0, - i64 16, - i64 %1) - - ret %a -} - -define @intrinsic_vmslt_mask_vi_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsle.vi v11, v8, -16, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv2i64.i64( - %0, - %1, - i64 -15, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmslt_vi_nxv4i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, -15 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.nxv4i64.i64( - %0, - i64 -14, - i64 %1) - - ret %a -} - -define @intrinsic_vmslt_mask_vi_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsle.vi v13, v8, -14, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmslt.mask.nxv4i64.i64( - %0, - %1, - i64 -13, - %2, - i64 %3) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt.ll similarity index 84% rename from llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vmslt.ll index 9be3ee4..ab56392 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmslt.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vmslt.nxv1i8( , , - i32); + iXLen); -define @intrinsic_vmslt_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmslt_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv1i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -26,9 +28,9 @@ declare @llvm.riscv.vmslt.mask.nxv1i8( , , , - i32); + iXLen); -define @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -42,13 +44,13 @@ entry: %mask = call @llvm.riscv.vmslt.nxv1i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmslt.mask.nxv1i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -56,9 +58,9 @@ entry: declare @llvm.riscv.vmslt.nxv2i8( , , - i32); + iXLen); -define @intrinsic_vmslt_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmslt_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -68,7 +70,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv2i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -78,9 +80,9 @@ declare @llvm.riscv.vmslt.mask.nxv2i8( , , , - i32); + iXLen); -define @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -94,13 +96,13 @@ entry: %mask = call @llvm.riscv.vmslt.nxv2i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmslt.mask.nxv2i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -108,9 +110,9 @@ entry: declare @llvm.riscv.vmslt.nxv4i8( , , - i32); + iXLen); -define @intrinsic_vmslt_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmslt_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -120,7 +122,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv4i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -130,9 +132,9 @@ declare @llvm.riscv.vmslt.mask.nxv4i8( , , , - i32); + iXLen); -define @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -146,13 +148,13 @@ entry: %mask = call @llvm.riscv.vmslt.nxv4i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmslt.mask.nxv4i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -160,9 +162,9 @@ entry: declare @llvm.riscv.vmslt.nxv8i8( , , - i32); + iXLen); -define @intrinsic_vmslt_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmslt_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -172,7 +174,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv8i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -182,9 +184,9 @@ declare @llvm.riscv.vmslt.mask.nxv8i8( , , , - i32); + iXLen); -define @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -198,13 +200,13 @@ entry: %mask = call @llvm.riscv.vmslt.nxv8i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmslt.mask.nxv8i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -212,9 +214,9 @@ entry: declare @llvm.riscv.vmslt.nxv16i8( , , - i32); + iXLen); -define @intrinsic_vmslt_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmslt_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -224,7 +226,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv16i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -234,9 +236,9 @@ declare @llvm.riscv.vmslt.mask.nxv16i8( , , , - i32); + iXLen); -define @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -250,13 +252,13 @@ entry: %mask = call @llvm.riscv.vmslt.nxv16i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmslt.mask.nxv16i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -264,9 +266,9 @@ entry: declare @llvm.riscv.vmslt.nxv32i8( , , - i32); + iXLen); -define @intrinsic_vmslt_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmslt_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -276,7 +278,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv32i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -286,9 +288,9 @@ declare @llvm.riscv.vmslt.mask.nxv32i8( , , , - i32); + iXLen); -define @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -302,13 +304,13 @@ entry: %mask = call @llvm.riscv.vmslt.nxv32i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmslt.mask.nxv32i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -316,9 +318,9 @@ entry: declare @llvm.riscv.vmslt.nxv1i16( , , - i32); + iXLen); -define @intrinsic_vmslt_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmslt_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -328,7 +330,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv1i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -338,9 +340,9 @@ declare @llvm.riscv.vmslt.mask.nxv1i16( , , , - i32); + iXLen); -define @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -354,13 +356,13 @@ entry: %mask = call @llvm.riscv.vmslt.nxv1i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmslt.mask.nxv1i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -368,9 +370,9 @@ entry: declare @llvm.riscv.vmslt.nxv2i16( , , - i32); + iXLen); -define @intrinsic_vmslt_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmslt_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -380,7 +382,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv2i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -390,9 +392,9 @@ declare @llvm.riscv.vmslt.mask.nxv2i16( , , , - i32); + iXLen); -define @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -406,13 +408,13 @@ entry: %mask = call @llvm.riscv.vmslt.nxv2i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmslt.mask.nxv2i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -420,9 +422,9 @@ entry: declare @llvm.riscv.vmslt.nxv4i16( , , - i32); + iXLen); -define @intrinsic_vmslt_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmslt_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -432,7 +434,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv4i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vmslt.mask.nxv4i16( , , , - i32); + iXLen); -define @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -458,13 +460,13 @@ entry: %mask = call @llvm.riscv.vmslt.nxv4i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmslt.mask.nxv4i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -472,9 +474,9 @@ entry: declare @llvm.riscv.vmslt.nxv8i16( , , - i32); + iXLen); -define @intrinsic_vmslt_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmslt_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -484,7 +486,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv8i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -494,9 +496,9 @@ declare @llvm.riscv.vmslt.mask.nxv8i16( , , , - i32); + iXLen); -define @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -510,13 +512,13 @@ entry: %mask = call @llvm.riscv.vmslt.nxv8i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmslt.mask.nxv8i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -524,9 +526,9 @@ entry: declare @llvm.riscv.vmslt.nxv16i16( , , - i32); + iXLen); -define @intrinsic_vmslt_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmslt_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -536,7 +538,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv16i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -546,9 +548,9 @@ declare @llvm.riscv.vmslt.mask.nxv16i16( , , , - i32); + iXLen); -define @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -562,13 +564,13 @@ entry: %mask = call @llvm.riscv.vmslt.nxv16i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmslt.mask.nxv16i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -576,9 +578,9 @@ entry: declare @llvm.riscv.vmslt.nxv1i32( , , - i32); + iXLen); -define @intrinsic_vmslt_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmslt_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -588,7 +590,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv1i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -598,9 +600,9 @@ declare @llvm.riscv.vmslt.mask.nxv1i32( , , , - i32); + iXLen); -define @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -614,13 +616,13 @@ entry: %mask = call @llvm.riscv.vmslt.nxv1i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmslt.mask.nxv1i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -628,9 +630,9 @@ entry: declare @llvm.riscv.vmslt.nxv2i32( , , - i32); + iXLen); -define @intrinsic_vmslt_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmslt_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -640,7 +642,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv2i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -650,9 +652,9 @@ declare @llvm.riscv.vmslt.mask.nxv2i32( , , , - i32); + iXLen); -define @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -666,13 +668,13 @@ entry: %mask = call @llvm.riscv.vmslt.nxv2i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmslt.mask.nxv2i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -680,9 +682,9 @@ entry: declare @llvm.riscv.vmslt.nxv4i32( , , - i32); + iXLen); -define @intrinsic_vmslt_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmslt_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -692,7 +694,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv4i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -702,9 +704,9 @@ declare @llvm.riscv.vmslt.mask.nxv4i32( , , , - i32); + iXLen); -define @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -718,13 +720,13 @@ entry: %mask = call @llvm.riscv.vmslt.nxv4i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmslt.mask.nxv4i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -732,9 +734,9 @@ entry: declare @llvm.riscv.vmslt.nxv8i32( , , - i32); + iXLen); -define @intrinsic_vmslt_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmslt_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -744,7 +746,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv8i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -754,9 +756,9 @@ declare @llvm.riscv.vmslt.mask.nxv8i32( , , , - i32); + iXLen); -define @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -770,13 +772,13 @@ entry: %mask = call @llvm.riscv.vmslt.nxv8i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmslt.mask.nxv8i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -784,9 +786,9 @@ entry: declare @llvm.riscv.vmslt.nxv1i64( , , - i32); + iXLen); -define @intrinsic_vmslt_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmslt_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -796,7 +798,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv1i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -806,9 +808,9 @@ declare @llvm.riscv.vmslt.mask.nxv1i64( , , , - i32); + iXLen); -define @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -822,13 +824,13 @@ entry: %mask = call @llvm.riscv.vmslt.nxv1i64( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmslt.mask.nxv1i64( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -836,9 +838,9 @@ entry: declare @llvm.riscv.vmslt.nxv2i64( , , - i32); + iXLen); -define @intrinsic_vmslt_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmslt_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -848,7 +850,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv2i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -858,9 +860,9 @@ declare @llvm.riscv.vmslt.mask.nxv2i64( , , , - i32); + iXLen); -define @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -874,13 +876,13 @@ entry: %mask = call @llvm.riscv.vmslt.nxv2i64( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmslt.mask.nxv2i64( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -888,9 +890,9 @@ entry: declare @llvm.riscv.vmslt.nxv4i64( , , - i32); + iXLen); -define @intrinsic_vmslt_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmslt_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -900,7 +902,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv4i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -910,9 +912,9 @@ declare @llvm.riscv.vmslt.mask.nxv4i64( , , , - i32); + iXLen); -define @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -926,13 +928,13 @@ entry: %mask = call @llvm.riscv.vmslt.nxv4i64( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmslt.mask.nxv4i64( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -940,9 +942,9 @@ entry: declare @llvm.riscv.vmslt.nxv1i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmslt_vx_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmslt_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -952,7 +954,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv1i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -962,9 +964,9 @@ declare @llvm.riscv.vmslt.mask.nxv1i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmslt_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -979,7 +981,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -987,9 +989,9 @@ entry: declare @llvm.riscv.vmslt.nxv2i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmslt_vx_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmslt_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -999,7 +1001,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv2i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1009,9 +1011,9 @@ declare @llvm.riscv.vmslt.mask.nxv2i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmslt_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1026,7 +1028,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1034,9 +1036,9 @@ entry: declare @llvm.riscv.vmslt.nxv4i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmslt_vx_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmslt_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -1046,7 +1048,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv4i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1056,9 +1058,9 @@ declare @llvm.riscv.vmslt.mask.nxv4i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmslt_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1073,7 +1075,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1081,9 +1083,9 @@ entry: declare @llvm.riscv.vmslt.nxv8i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmslt_vx_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmslt_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -1093,7 +1095,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv8i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1103,9 +1105,9 @@ declare @llvm.riscv.vmslt.mask.nxv8i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmslt_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1120,7 +1122,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1128,9 +1130,9 @@ entry: declare @llvm.riscv.vmslt.nxv16i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmslt_vx_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmslt_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -1140,7 +1142,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv16i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1150,9 +1152,9 @@ declare @llvm.riscv.vmslt.mask.nxv16i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmslt_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1167,7 +1169,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1175,9 +1177,9 @@ entry: declare @llvm.riscv.vmslt.nxv32i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmslt_vx_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmslt_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -1187,7 +1189,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv32i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1197,9 +1199,9 @@ declare @llvm.riscv.vmslt.mask.nxv32i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmslt_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1214,7 +1216,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1222,9 +1224,9 @@ entry: declare @llvm.riscv.vmslt.nxv1i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmslt_vx_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmslt_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -1234,7 +1236,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv1i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1244,9 +1246,9 @@ declare @llvm.riscv.vmslt.mask.nxv1i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmslt_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1261,7 +1263,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1269,9 +1271,9 @@ entry: declare @llvm.riscv.vmslt.nxv2i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmslt_vx_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmslt_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -1281,7 +1283,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv2i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1291,9 +1293,9 @@ declare @llvm.riscv.vmslt.mask.nxv2i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmslt_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1308,7 +1310,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1316,9 +1318,9 @@ entry: declare @llvm.riscv.vmslt.nxv4i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmslt_vx_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmslt_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -1328,7 +1330,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv4i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1338,9 +1340,9 @@ declare @llvm.riscv.vmslt.mask.nxv4i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmslt_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1355,7 +1357,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1363,9 +1365,9 @@ entry: declare @llvm.riscv.vmslt.nxv8i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmslt_vx_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmslt_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -1375,7 +1377,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv8i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1385,9 +1387,9 @@ declare @llvm.riscv.vmslt.mask.nxv8i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmslt_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1402,7 +1404,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1410,9 +1412,9 @@ entry: declare @llvm.riscv.vmslt.nxv16i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmslt_vx_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmslt_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -1422,7 +1424,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv16i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1432,9 +1434,9 @@ declare @llvm.riscv.vmslt.mask.nxv16i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmslt_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1449,7 +1451,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1457,9 +1459,9 @@ entry: declare @llvm.riscv.vmslt.nxv1i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmslt_vx_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmslt_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -1469,7 +1471,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv1i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1479,9 +1481,9 @@ declare @llvm.riscv.vmslt.mask.nxv1i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmslt_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1496,7 +1498,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1504,9 +1506,9 @@ entry: declare @llvm.riscv.vmslt.nxv2i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmslt_vx_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmslt_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -1516,7 +1518,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv2i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1526,9 +1528,9 @@ declare @llvm.riscv.vmslt.mask.nxv2i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmslt_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1543,7 +1545,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1551,9 +1553,9 @@ entry: declare @llvm.riscv.vmslt.nxv4i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmslt_vx_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmslt_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -1563,7 +1565,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv4i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1573,9 +1575,9 @@ declare @llvm.riscv.vmslt.mask.nxv4i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmslt_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1590,7 +1592,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1598,9 +1600,9 @@ entry: declare @llvm.riscv.vmslt.nxv8i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmslt_vx_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmslt_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -1610,7 +1612,7 @@ entry: %a = call @llvm.riscv.vmslt.nxv8i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1620,9 +1622,9 @@ declare @llvm.riscv.vmslt.mask.nxv8i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmslt_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmslt_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1637,7 +1639,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1645,25 +1647,31 @@ entry: declare @llvm.riscv.vmslt.nxv1i64.i64( , i64, - i32); - -define @intrinsic_vmslt_vx_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vmslt.vv v0, v8, v9 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmslt_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmslt_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vmslt.vv v0, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmslt_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmslt.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmslt.nxv1i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1673,30 +1681,39 @@ declare @llvm.riscv.vmslt.mask.nxv1i64.i64( , i64, , - i32); - -define @intrinsic_vmslt_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v11, (a0), zero -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vv v10, v8, v11, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmslt_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v11, (a0), zero +; RV32-NEXT: vmv1r.v v10, v0 +; RV32-NEXT: vmv1r.v v0, v9 +; RV32-NEXT: vmslt.vv v10, v8, v11, v0.t +; RV32-NEXT: vmv.v.v v0, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v10, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vmv1r.v v0, v9 +; RV64-NEXT: vmslt.vx v10, v8, a0, v0.t +; RV64-NEXT: vmv.v.v v0, v10 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmslt.mask.nxv1i64.i64( %0, %1, i64 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1704,25 +1721,31 @@ entry: declare @llvm.riscv.vmslt.nxv2i64.i64( , i64, - i32); - -define @intrinsic_vmslt_vx_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vmslt.vv v0, v8, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmslt_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmslt_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vmslt.vv v0, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmslt_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vmslt.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmslt.nxv2i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1732,30 +1755,39 @@ declare @llvm.riscv.vmslt.mask.nxv2i64.i64( , i64, , - i32); - -define @intrinsic_vmslt_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmslt.vv v11, v8, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmslt_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmv1r.v v11, v0 +; RV32-NEXT: vmv1r.v v0, v10 +; RV32-NEXT: vmslt.vv v11, v8, v12, v0.t +; RV32-NEXT: vmv1r.v v0, v11 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v11, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vmv1r.v v0, v10 +; RV64-NEXT: vmslt.vx v11, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v11 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmslt.mask.nxv2i64.i64( %0, %1, i64 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1763,25 +1795,31 @@ entry: declare @llvm.riscv.vmslt.nxv4i64.i64( , i64, - i32); - -define @intrinsic_vmslt_vx_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmslt.vv v0, v8, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmslt_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmslt_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmslt.vv v0, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmslt_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmslt.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmslt.nxv4i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1791,35 +1829,44 @@ declare @llvm.riscv.vmslt.mask.nxv4i64.i64( , i64, , - i32); - -define @intrinsic_vmslt_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmslt.vv v13, v8, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmslt_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vmv1r.v v13, v0 +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: vmslt.vv v13, v8, v16, v0.t +; RV32-NEXT: vmv1r.v v0, v13 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v13, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: vmslt.vx v13, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v13 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmslt.mask.nxv4i64.i64( %0, %1, i64 %2, %3, - i32 %4) + iXLen %4) ret %a } -define @intrinsic_vmslt_vi_nxv1i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmslt_vi_nxv1i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -1829,12 +1876,12 @@ entry: %a = call @llvm.riscv.vmslt.nxv1i8.i8( %0, i8 -15, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmslt_mask_vi_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmslt_mask_vi_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1849,12 +1896,12 @@ entry: %1, i8 -14, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmslt_vi_nxv2i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmslt_vi_nxv2i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -1864,12 +1911,12 @@ entry: %a = call @llvm.riscv.vmslt.nxv2i8.i8( %0, i8 -13, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmslt_mask_vi_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmslt_mask_vi_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1884,12 +1931,12 @@ entry: %1, i8 -12, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmslt_vi_nxv4i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmslt_vi_nxv4i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -1899,12 +1946,12 @@ entry: %a = call @llvm.riscv.vmslt.nxv4i8.i8( %0, i8 -11, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmslt_mask_vi_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmslt_mask_vi_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1919,12 +1966,12 @@ entry: %1, i8 -10, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmslt_vi_nxv8i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmslt_vi_nxv8i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -1934,12 +1981,12 @@ entry: %a = call @llvm.riscv.vmslt.nxv8i8.i8( %0, i8 -9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmslt_mask_vi_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmslt_mask_vi_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1954,12 +2001,12 @@ entry: %1, i8 -8, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmslt_vi_nxv16i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmslt_vi_nxv16i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -1969,12 +2016,12 @@ entry: %a = call @llvm.riscv.vmslt.nxv16i8.i8( %0, i8 -7, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmslt_mask_vi_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmslt_mask_vi_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1989,12 +2036,12 @@ entry: %1, i8 -6, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmslt_vi_nxv32i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmslt_vi_nxv32i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -2004,12 +2051,12 @@ entry: %a = call @llvm.riscv.vmslt.nxv32i8.i8( %0, i8 -5, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmslt_mask_vi_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmslt_mask_vi_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2024,12 +2071,12 @@ entry: %1, i8 -4, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmslt_vi_nxv1i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmslt_vi_nxv1i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -2039,12 +2086,12 @@ entry: %a = call @llvm.riscv.vmslt.nxv1i16.i16( %0, i16 -3, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmslt_mask_vi_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmslt_mask_vi_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2059,12 +2106,12 @@ entry: %1, i16 -2, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmslt_vi_nxv2i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmslt_vi_nxv2i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -2074,12 +2121,12 @@ entry: %a = call @llvm.riscv.vmslt.nxv2i16.i16( %0, i16 -1, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmslt_mask_vi_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmslt_mask_vi_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2094,12 +2141,12 @@ entry: %1, i16 0, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmslt_vi_nxv4i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmslt_vi_nxv4i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -2109,12 +2156,12 @@ entry: %a = call @llvm.riscv.vmslt.nxv4i16.i16( %0, i16 0, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmslt_mask_vi_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmslt_mask_vi_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2129,12 +2176,12 @@ entry: %1, i16 1, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmslt_vi_nxv8i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmslt_vi_nxv8i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -2144,12 +2191,12 @@ entry: %a = call @llvm.riscv.vmslt.nxv8i16.i16( %0, i16 2, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmslt_mask_vi_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmslt_mask_vi_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2164,12 +2211,12 @@ entry: %1, i16 3, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmslt_vi_nxv16i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmslt_vi_nxv16i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -2179,12 +2226,12 @@ entry: %a = call @llvm.riscv.vmslt.nxv16i16.i16( %0, i16 4, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmslt_mask_vi_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmslt_mask_vi_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2199,12 +2246,12 @@ entry: %1, i16 5, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmslt_vi_nxv1i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmslt_vi_nxv1i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -2214,12 +2261,12 @@ entry: %a = call @llvm.riscv.vmslt.nxv1i32.i32( %0, i32 6, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmslt_mask_vi_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmslt_mask_vi_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2234,12 +2281,12 @@ entry: %1, i32 7, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmslt_vi_nxv2i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmslt_vi_nxv2i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -2249,12 +2296,12 @@ entry: %a = call @llvm.riscv.vmslt.nxv2i32.i32( %0, i32 8, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmslt_mask_vi_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmslt_mask_vi_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2269,12 +2316,12 @@ entry: %1, i32 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmslt_vi_nxv4i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmslt_vi_nxv4i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -2284,12 +2331,12 @@ entry: %a = call @llvm.riscv.vmslt.nxv4i32.i32( %0, i32 10, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmslt_mask_vi_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmslt_mask_vi_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2304,12 +2351,12 @@ entry: %1, i32 11, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmslt_vi_nxv8i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmslt_vi_nxv8i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -2319,12 +2366,12 @@ entry: %a = call @llvm.riscv.vmslt.nxv8i32.i32( %0, i32 12, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmslt_mask_vi_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmslt_mask_vi_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2339,12 +2386,12 @@ entry: %1, i32 13, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmslt_vi_nxv1i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vmslt_vi_nxv1i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -2354,12 +2401,12 @@ entry: %a = call @llvm.riscv.vmslt.nxv1i64.i64( %0, i64 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmslt_mask_vi_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmslt_mask_vi_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2374,12 +2421,12 @@ entry: %1, i64 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmslt_vi_nxv2i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vmslt_vi_nxv2i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -2389,12 +2436,12 @@ entry: %a = call @llvm.riscv.vmslt.nxv2i64.i64( %0, i64 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmslt_mask_vi_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmslt_mask_vi_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2409,12 +2456,12 @@ entry: %1, i64 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmslt_vi_nxv4i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vmslt_vi_nxv4i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -2424,12 +2471,12 @@ entry: %a = call @llvm.riscv.vmslt.nxv4i64.i64( %0, i64 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmslt_mask_vi_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmslt_mask_vi_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2444,7 +2491,7 @@ entry: %1, i64 9, %2, - i32 %3) + iXLen %3) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll deleted file mode 100644 index 87e0f9a..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll +++ /dev/null @@ -1,2414 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmsltu.nxv1i8( - , - , - i64); - -define @intrinsic_vmsltu_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv1i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv1i8( - , - , - , - , - i64); - -define @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmsltu.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsltu.nxv1i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsltu.mask.nxv1i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv2i8( - , - , - i64); - -define @intrinsic_vmsltu_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv2i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv2i8( - , - , - , - , - i64); - -define @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmsltu.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsltu.nxv2i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsltu.mask.nxv2i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv4i8( - , - , - i64); - -define @intrinsic_vmsltu_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv4i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv4i8( - , - , - , - , - i64); - -define @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmsltu.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsltu.nxv4i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsltu.mask.nxv4i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv8i8( - , - , - i64); - -define @intrinsic_vmsltu_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv8i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv8i8( - , - , - , - , - i64); - -define @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmsltu.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsltu.nxv8i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsltu.mask.nxv8i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv16i8( - , - , - i64); - -define @intrinsic_vmsltu_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vv_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv16i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv16i8( - , - , - , - , - i64); - -define @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmsltu.vv v14, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsltu.vv v8, v10, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsltu.nxv16i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsltu.mask.nxv16i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv32i8( - , - , - i64); - -define @intrinsic_vmsltu_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vv_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv32i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv32i8( - , - , - , - , - i64); - -define @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmsltu.vv v20, v8, v12 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsltu.vv v8, v12, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsltu.nxv32i8( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsltu.mask.nxv32i8( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv1i16( - , - , - i64); - -define @intrinsic_vmsltu_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv1i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv1i16( - , - , - , - , - i64); - -define @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmsltu.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsltu.nxv1i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsltu.mask.nxv1i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv2i16( - , - , - i64); - -define @intrinsic_vmsltu_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv2i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv2i16( - , - , - , - , - i64); - -define @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmsltu.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsltu.nxv2i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsltu.mask.nxv2i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv4i16( - , - , - i64); - -define @intrinsic_vmsltu_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv4i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv4i16( - , - , - , - , - i64); - -define @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmsltu.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsltu.nxv4i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsltu.mask.nxv4i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv8i16( - , - , - i64); - -define @intrinsic_vmsltu_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv8i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv8i16( - , - , - , - , - i64); - -define @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmsltu.vv v14, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsltu.vv v8, v10, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsltu.nxv8i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsltu.mask.nxv8i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv16i16( - , - , - i64); - -define @intrinsic_vmsltu_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vv_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv16i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv16i16( - , - , - , - , - i64); - -define @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmsltu.vv v20, v8, v12 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsltu.vv v8, v12, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsltu.nxv16i16( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsltu.mask.nxv16i16( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv1i32( - , - , - i64); - -define @intrinsic_vmsltu_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv1i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv1i32( - , - , - , - , - i64); - -define @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmsltu.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsltu.nxv1i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsltu.mask.nxv1i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv2i32( - , - , - i64); - -define @intrinsic_vmsltu_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv2i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv2i32( - , - , - , - , - i64); - -define @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmsltu.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsltu.nxv2i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsltu.mask.nxv2i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv4i32( - , - , - i64); - -define @intrinsic_vmsltu_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv4i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv4i32( - , - , - , - , - i64); - -define @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmsltu.vv v14, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsltu.vv v8, v10, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsltu.nxv4i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsltu.mask.nxv4i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv8i32( - , - , - i64); - -define @intrinsic_vmsltu_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv8i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv8i32( - , - , - , - , - i64); - -define @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmsltu.vv v20, v8, v12 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsltu.vv v8, v12, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsltu.nxv8i32( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsltu.mask.nxv8i32( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv1i64( - , - , - i64); - -define @intrinsic_vmsltu_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv1i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv1i64( - , - , - , - , - i64); - -define @intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmsltu.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsltu.nxv1i64( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsltu.mask.nxv1i64( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv2i64( - , - , - i64); - -define @intrinsic_vmsltu_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv2i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv2i64( - , - , - , - , - i64); - -define @intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmsltu.vv v14, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsltu.vv v8, v10, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsltu.nxv2i64( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsltu.mask.nxv2i64( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv4i64( - , - , - i64); - -define @intrinsic_vmsltu_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmsltu.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv4i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv4i64( - , - , - , - , - i64); - -define @intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmsltu.vv v20, v8, v12 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsltu.vv v8, v12, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsltu.nxv4i64( - %1, - %2, - i64 %4) - %a = call @llvm.riscv.vmsltu.mask.nxv4i64( - %0, - %2, - %3, - %mask, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv1i8.i8( - , - i8, - i64); - -define @intrinsic_vmsltu_vx_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vmsltu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv1i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv1i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsltu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv1i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv2i8.i8( - , - i8, - i64); - -define @intrinsic_vmsltu_vx_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vmsltu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv2i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv2i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsltu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv2i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv4i8.i8( - , - i8, - i64); - -define @intrinsic_vmsltu_vx_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vmsltu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv4i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv4i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsltu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv4i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv8i8.i8( - , - i8, - i64); - -define @intrinsic_vmsltu_vx_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vmsltu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv8i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv8i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsltu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv8i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv16i8.i8( - , - i8, - i64); - -define @intrinsic_vmsltu_vx_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vx_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vmsltu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv16i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv16i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsltu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv16i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv32i8.i8( - , - i8, - i64); - -define @intrinsic_vmsltu_vx_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vx_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vmsltu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv32i8.i8( - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv32i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vmsltu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv32i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv1i16.i16( - , - i16, - i64); - -define @intrinsic_vmsltu_vx_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vmsltu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv1i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv1i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsltu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv1i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv2i16.i16( - , - i16, - i64); - -define @intrinsic_vmsltu_vx_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vmsltu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv2i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv2i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsltu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv2i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv4i16.i16( - , - i16, - i64); - -define @intrinsic_vmsltu_vx_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vmsltu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv4i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv4i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsltu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv4i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv8i16.i16( - , - i16, - i64); - -define @intrinsic_vmsltu_vx_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vmsltu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv8i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv8i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsltu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv8i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv16i16.i16( - , - i16, - i64); - -define @intrinsic_vmsltu_vx_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vx_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vmsltu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv16i16.i16( - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv16i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vmsltu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv16i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv1i32.i32( - , - i32, - i64); - -define @intrinsic_vmsltu_vx_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vmsltu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv1i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv1i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmsltu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv1i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv2i32.i32( - , - i32, - i64); - -define @intrinsic_vmsltu_vx_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vmsltu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv2i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv2i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmsltu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv2i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv4i32.i32( - , - i32, - i64); - -define @intrinsic_vmsltu_vx_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vmsltu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv4i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv4i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmsltu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv4i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv8i32.i32( - , - i32, - i64); - -define @intrinsic_vmsltu_vx_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vmsltu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv8i32.i32( - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv8i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vmsltu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv8i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv1i64.i64( - , - i64, - i64); - -define @intrinsic_vmsltu_vx_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vmsltu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv1i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv1i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vmsltu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv2i64.i64( - , - i64, - i64); - -define @intrinsic_vmsltu_vx_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vmsltu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv2i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv2i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vmsltu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4) - - ret %a -} - -declare @llvm.riscv.vmsltu.nxv4i64.i64( - , - i64, - i64); - -define @intrinsic_vmsltu_vx_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vmsltu.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv4i64.i64( - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmsltu.mask.nxv4i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vmsltu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4) - - ret %a -} - -define @intrinsic_vmsltu_vi_nxv1i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, -16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv1i8.i8( - %0, - i8 -15, - i64 %1) - - ret %a -} - -define @intrinsic_vmsltu_mask_vi_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vi v10, v8, -15, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv1i8.i8( - %0, - %1, - i8 -14, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsltu_vi_nxv2i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, -14 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv2i8.i8( - %0, - i8 -13, - i64 %1) - - ret %a -} - -define @intrinsic_vmsltu_mask_vi_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vi v10, v8, -13, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv2i8.i8( - %0, - %1, - i8 -12, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsltu_vi_nxv4i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, -12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv4i8.i8( - %0, - i8 -11, - i64 %1) - - ret %a -} - -define @intrinsic_vmsltu_mask_vi_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vi v10, v8, -11, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv4i8.i8( - %0, - %1, - i8 -10, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsltu_vi_nxv8i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, -10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv8i8.i8( - %0, - i8 -9, - i64 %1) - - ret %a -} - -define @intrinsic_vmsltu_mask_vi_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vi v10, v8, -9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv8i8.i8( - %0, - %1, - i8 -8, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsltu_vi_nxv16i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, -8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv16i8.i8( - %0, - i8 -7, - i64 %1) - - ret %a -} - -define @intrinsic_vmsltu_mask_vi_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsleu.vi v11, v8, -7, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv16i8.i8( - %0, - %1, - i8 -6, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsltu_vi_nxv32i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vi_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, -6 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv32i8.i8( - %0, - i8 -5, - i64 %1) - - ret %a -} - -define @intrinsic_vmsltu_mask_vi_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsleu.vi v13, v8, -5, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv32i8.i8( - %0, - %1, - i8 -4, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsltu_vi_nxv1i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, -4 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv1i16.i16( - %0, - i16 -3, - i64 %1) - - ret %a -} - -define @intrinsic_vmsltu_mask_vi_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vi v10, v8, -3, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv1i16.i16( - %0, - %1, - i16 -2, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsltu_vi_nxv2i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, -2 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv2i16.i16( - %0, - i16 -1, - i64 %1) - - ret %a -} - -define @intrinsic_vmsltu_mask_vi_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vx v10, v8, zero, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv2i16.i16( - %0, - %1, - i16 0, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsltu_vi_nxv4i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmsltu.vx v0, v8, zero -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv4i16.i16( - %0, - i16 0, - i64 %1) - - ret %a -} - -define @intrinsic_vmsltu_mask_vi_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vi v10, v8, 0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv4i16.i16( - %0, - %1, - i16 1, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsltu_vi_nxv8i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, 1 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv8i16.i16( - %0, - i16 2, - i64 %1) - - ret %a -} - -define @intrinsic_vmsltu_mask_vi_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsleu.vi v11, v8, 2, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv8i16.i16( - %0, - %1, - i16 3, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsltu_vi_nxv16i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, 3 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv16i16.i16( - %0, - i16 4, - i64 %1) - - ret %a -} - -define @intrinsic_vmsltu_mask_vi_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsleu.vi v13, v8, 4, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv16i16.i16( - %0, - %1, - i16 5, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsltu_vi_nxv1i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, 5 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv1i32.i32( - %0, - i32 6, - i64 %1) - - ret %a -} - -define @intrinsic_vmsltu_mask_vi_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vi v10, v8, 6, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv1i32.i32( - %0, - %1, - i32 7, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsltu_vi_nxv2i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, 7 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv2i32.i32( - %0, - i32 8, - i64 %1) - - ret %a -} - -define @intrinsic_vmsltu_mask_vi_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vi v10, v8, 8, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv2i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsltu_vi_nxv4i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv4i32.i32( - %0, - i32 10, - i64 %1) - - ret %a -} - -define @intrinsic_vmsltu_mask_vi_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsleu.vi v11, v8, 10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv4i32.i32( - %0, - %1, - i32 11, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsltu_vi_nxv8i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, 11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv8i32.i32( - %0, - i32 12, - i64 %1) - - ret %a -} - -define @intrinsic_vmsltu_mask_vi_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsleu.vi v13, v8, 12, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv8i32.i32( - %0, - %1, - i32 13, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsltu_vi_nxv1i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, 13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv1i64.i64( - %0, - i64 14, - i64 %1) - - ret %a -} - -define @intrinsic_vmsltu_mask_vi_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vi v10, v8, 14, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv1i64.i64( - %0, - %1, - i64 15, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsltu_vi_nxv2i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, 15 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv2i64.i64( - %0, - i64 16, - i64 %1) - - ret %a -} - -define @intrinsic_vmsltu_mask_vi_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsleu.vi v11, v8, -16, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv2i64.i64( - %0, - %1, - i64 -15, - %2, - i64 %3) - - ret %a -} - -define @intrinsic_vmsltu_vi_nxv4i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmsleu.vi v0, v8, -15 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.nxv4i64.i64( - %0, - i64 -14, - i64 %1) - - ret %a -} - -define @intrinsic_vmsltu_mask_vi_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsleu.vi v13, v8, -14, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsltu.mask.nxv4i64.i64( - %0, - %1, - i64 -13, - %2, - i64 %3) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll similarity index 85% rename from llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vmsltu.ll index 009d196..f3d0ca3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vmsltu.nxv1i8( , , - i32); + iXLen); -define @intrinsic_vmsltu_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv1i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -26,9 +28,9 @@ declare @llvm.riscv.vmsltu.mask.nxv1i8( , , , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -42,13 +44,13 @@ entry: %mask = call @llvm.riscv.vmsltu.nxv1i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsltu.mask.nxv1i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -56,9 +58,9 @@ entry: declare @llvm.riscv.vmsltu.nxv2i8( , , - i32); + iXLen); -define @intrinsic_vmsltu_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -68,7 +70,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv2i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -78,9 +80,9 @@ declare @llvm.riscv.vmsltu.mask.nxv2i8( , , , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -94,13 +96,13 @@ entry: %mask = call @llvm.riscv.vmsltu.nxv2i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsltu.mask.nxv2i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -108,9 +110,9 @@ entry: declare @llvm.riscv.vmsltu.nxv4i8( , , - i32); + iXLen); -define @intrinsic_vmsltu_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -120,7 +122,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv4i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -130,9 +132,9 @@ declare @llvm.riscv.vmsltu.mask.nxv4i8( , , , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -146,13 +148,13 @@ entry: %mask = call @llvm.riscv.vmsltu.nxv4i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsltu.mask.nxv4i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -160,9 +162,9 @@ entry: declare @llvm.riscv.vmsltu.nxv8i8( , , - i32); + iXLen); -define @intrinsic_vmsltu_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -172,7 +174,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv8i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -182,9 +184,9 @@ declare @llvm.riscv.vmsltu.mask.nxv8i8( , , , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -198,13 +200,13 @@ entry: %mask = call @llvm.riscv.vmsltu.nxv8i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsltu.mask.nxv8i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -212,9 +214,9 @@ entry: declare @llvm.riscv.vmsltu.nxv16i8( , , - i32); + iXLen); -define @intrinsic_vmsltu_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -224,7 +226,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv16i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -234,9 +236,9 @@ declare @llvm.riscv.vmsltu.mask.nxv16i8( , , , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -250,13 +252,13 @@ entry: %mask = call @llvm.riscv.vmsltu.nxv16i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsltu.mask.nxv16i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -264,9 +266,9 @@ entry: declare @llvm.riscv.vmsltu.nxv32i8( , , - i32); + iXLen); -define @intrinsic_vmsltu_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -276,7 +278,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv32i8( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -286,9 +288,9 @@ declare @llvm.riscv.vmsltu.mask.nxv32i8( , , , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -302,13 +304,13 @@ entry: %mask = call @llvm.riscv.vmsltu.nxv32i8( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsltu.mask.nxv32i8( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -316,9 +318,9 @@ entry: declare @llvm.riscv.vmsltu.nxv1i16( , , - i32); + iXLen); -define @intrinsic_vmsltu_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -328,7 +330,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv1i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -338,9 +340,9 @@ declare @llvm.riscv.vmsltu.mask.nxv1i16( , , , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -354,13 +356,13 @@ entry: %mask = call @llvm.riscv.vmsltu.nxv1i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsltu.mask.nxv1i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -368,9 +370,9 @@ entry: declare @llvm.riscv.vmsltu.nxv2i16( , , - i32); + iXLen); -define @intrinsic_vmsltu_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -380,7 +382,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv2i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -390,9 +392,9 @@ declare @llvm.riscv.vmsltu.mask.nxv2i16( , , , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -406,13 +408,13 @@ entry: %mask = call @llvm.riscv.vmsltu.nxv2i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsltu.mask.nxv2i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -420,9 +422,9 @@ entry: declare @llvm.riscv.vmsltu.nxv4i16( , , - i32); + iXLen); -define @intrinsic_vmsltu_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -432,7 +434,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv4i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vmsltu.mask.nxv4i16( , , , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -458,13 +460,13 @@ entry: %mask = call @llvm.riscv.vmsltu.nxv4i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsltu.mask.nxv4i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -472,9 +474,9 @@ entry: declare @llvm.riscv.vmsltu.nxv8i16( , , - i32); + iXLen); -define @intrinsic_vmsltu_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -484,7 +486,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv8i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -494,9 +496,9 @@ declare @llvm.riscv.vmsltu.mask.nxv8i16( , , , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -510,13 +512,13 @@ entry: %mask = call @llvm.riscv.vmsltu.nxv8i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsltu.mask.nxv8i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -524,9 +526,9 @@ entry: declare @llvm.riscv.vmsltu.nxv16i16( , , - i32); + iXLen); -define @intrinsic_vmsltu_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -536,7 +538,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv16i16( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -546,9 +548,9 @@ declare @llvm.riscv.vmsltu.mask.nxv16i16( , , , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -562,13 +564,13 @@ entry: %mask = call @llvm.riscv.vmsltu.nxv16i16( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsltu.mask.nxv16i16( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -576,9 +578,9 @@ entry: declare @llvm.riscv.vmsltu.nxv1i32( , , - i32); + iXLen); -define @intrinsic_vmsltu_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -588,7 +590,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv1i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -598,9 +600,9 @@ declare @llvm.riscv.vmsltu.mask.nxv1i32( , , , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -614,13 +616,13 @@ entry: %mask = call @llvm.riscv.vmsltu.nxv1i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsltu.mask.nxv1i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -628,9 +630,9 @@ entry: declare @llvm.riscv.vmsltu.nxv2i32( , , - i32); + iXLen); -define @intrinsic_vmsltu_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -640,7 +642,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv2i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -650,9 +652,9 @@ declare @llvm.riscv.vmsltu.mask.nxv2i32( , , , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -666,13 +668,13 @@ entry: %mask = call @llvm.riscv.vmsltu.nxv2i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsltu.mask.nxv2i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -680,9 +682,9 @@ entry: declare @llvm.riscv.vmsltu.nxv4i32( , , - i32); + iXLen); -define @intrinsic_vmsltu_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -692,7 +694,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv4i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -702,9 +704,9 @@ declare @llvm.riscv.vmsltu.mask.nxv4i32( , , , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -718,13 +720,13 @@ entry: %mask = call @llvm.riscv.vmsltu.nxv4i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsltu.mask.nxv4i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -732,9 +734,9 @@ entry: declare @llvm.riscv.vmsltu.nxv8i32( , , - i32); + iXLen); -define @intrinsic_vmsltu_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -744,7 +746,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv8i32( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -754,9 +756,9 @@ declare @llvm.riscv.vmsltu.mask.nxv8i32( , , , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -770,13 +772,13 @@ entry: %mask = call @llvm.riscv.vmsltu.nxv8i32( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsltu.mask.nxv8i32( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -784,9 +786,9 @@ entry: declare @llvm.riscv.vmsltu.nxv1i64( , , - i32); + iXLen); -define @intrinsic_vmsltu_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -796,7 +798,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv1i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -806,9 +808,9 @@ declare @llvm.riscv.vmsltu.mask.nxv1i64( , , , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -822,13 +824,13 @@ entry: %mask = call @llvm.riscv.vmsltu.nxv1i64( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsltu.mask.nxv1i64( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -836,9 +838,9 @@ entry: declare @llvm.riscv.vmsltu.nxv2i64( , , - i32); + iXLen); -define @intrinsic_vmsltu_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -848,7 +850,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv2i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -858,9 +860,9 @@ declare @llvm.riscv.vmsltu.mask.nxv2i64( , , , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -874,13 +876,13 @@ entry: %mask = call @llvm.riscv.vmsltu.nxv2i64( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsltu.mask.nxv2i64( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -888,9 +890,9 @@ entry: declare @llvm.riscv.vmsltu.nxv4i64( , , - i32); + iXLen); -define @intrinsic_vmsltu_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -900,7 +902,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv4i64( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -910,9 +912,9 @@ declare @llvm.riscv.vmsltu.mask.nxv4i64( , , , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -926,13 +928,13 @@ entry: %mask = call @llvm.riscv.vmsltu.nxv4i64( %1, %2, - i32 %4) + iXLen %4) %a = call @llvm.riscv.vmsltu.mask.nxv4i64( %0, %2, %3, %mask, - i32 %4) + iXLen %4) ret %a } @@ -940,9 +942,9 @@ entry: declare @llvm.riscv.vmsltu.nxv1i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsltu_vx_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -952,7 +954,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv1i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -962,9 +964,9 @@ declare @llvm.riscv.vmsltu.mask.nxv1i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -979,7 +981,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -987,9 +989,9 @@ entry: declare @llvm.riscv.vmsltu.nxv2i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsltu_vx_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -999,7 +1001,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv2i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1009,9 +1011,9 @@ declare @llvm.riscv.vmsltu.mask.nxv2i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1026,7 +1028,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1034,9 +1036,9 @@ entry: declare @llvm.riscv.vmsltu.nxv4i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsltu_vx_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -1046,7 +1048,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv4i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1056,9 +1058,9 @@ declare @llvm.riscv.vmsltu.mask.nxv4i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1073,7 +1075,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1081,9 +1083,9 @@ entry: declare @llvm.riscv.vmsltu.nxv8i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsltu_vx_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -1093,7 +1095,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv8i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1103,9 +1105,9 @@ declare @llvm.riscv.vmsltu.mask.nxv8i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1120,7 +1122,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1128,9 +1130,9 @@ entry: declare @llvm.riscv.vmsltu.nxv16i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsltu_vx_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -1140,7 +1142,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv16i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1150,9 +1152,9 @@ declare @llvm.riscv.vmsltu.mask.nxv16i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1167,7 +1169,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1175,9 +1177,9 @@ entry: declare @llvm.riscv.vmsltu.nxv32i8.i8( , i8, - i32); + iXLen); -define @intrinsic_vmsltu_vx_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -1187,7 +1189,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv32i8.i8( %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1197,9 +1199,9 @@ declare @llvm.riscv.vmsltu.mask.nxv32i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1214,7 +1216,7 @@ entry: %1, i8 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1222,9 +1224,9 @@ entry: declare @llvm.riscv.vmsltu.nxv1i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsltu_vx_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -1234,7 +1236,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv1i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1244,9 +1246,9 @@ declare @llvm.riscv.vmsltu.mask.nxv1i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1261,7 +1263,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1269,9 +1271,9 @@ entry: declare @llvm.riscv.vmsltu.nxv2i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsltu_vx_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -1281,7 +1283,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv2i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1291,9 +1293,9 @@ declare @llvm.riscv.vmsltu.mask.nxv2i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1308,7 +1310,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1316,9 +1318,9 @@ entry: declare @llvm.riscv.vmsltu.nxv4i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsltu_vx_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -1328,7 +1330,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv4i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1338,9 +1340,9 @@ declare @llvm.riscv.vmsltu.mask.nxv4i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1355,7 +1357,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1363,9 +1365,9 @@ entry: declare @llvm.riscv.vmsltu.nxv8i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsltu_vx_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -1375,7 +1377,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv8i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1385,9 +1387,9 @@ declare @llvm.riscv.vmsltu.mask.nxv8i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1402,7 +1404,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1410,9 +1412,9 @@ entry: declare @llvm.riscv.vmsltu.nxv16i16.i16( , i16, - i32); + iXLen); -define @intrinsic_vmsltu_vx_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -1422,7 +1424,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv16i16.i16( %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1432,9 +1434,9 @@ declare @llvm.riscv.vmsltu.mask.nxv16i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1449,7 +1451,7 @@ entry: %1, i16 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1457,9 +1459,9 @@ entry: declare @llvm.riscv.vmsltu.nxv1i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsltu_vx_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -1469,7 +1471,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv1i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1479,9 +1481,9 @@ declare @llvm.riscv.vmsltu.mask.nxv1i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1496,7 +1498,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1504,9 +1506,9 @@ entry: declare @llvm.riscv.vmsltu.nxv2i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsltu_vx_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -1516,7 +1518,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv2i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1526,9 +1528,9 @@ declare @llvm.riscv.vmsltu.mask.nxv2i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1543,7 +1545,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1551,9 +1553,9 @@ entry: declare @llvm.riscv.vmsltu.nxv4i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsltu_vx_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -1563,7 +1565,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv4i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1573,9 +1575,9 @@ declare @llvm.riscv.vmsltu.mask.nxv4i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1590,7 +1592,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1598,9 +1600,9 @@ entry: declare @llvm.riscv.vmsltu.nxv8i32.i32( , i32, - i32); + iXLen); -define @intrinsic_vmsltu_vx_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vmsltu_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -1610,7 +1612,7 @@ entry: %a = call @llvm.riscv.vmsltu.nxv8i32.i32( %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1620,9 +1622,9 @@ declare @llvm.riscv.vmsltu.mask.nxv8i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vmsltu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vmsltu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1637,7 +1639,7 @@ entry: %1, i32 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1645,25 +1647,31 @@ entry: declare @llvm.riscv.vmsltu.nxv1i64.i64( , i64, - i32); - -define @intrinsic_vmsltu_vx_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vmsltu.vv v0, v8, v9 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsltu_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsltu_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vmsltu.vv v0, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsltu_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmsltu.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsltu.nxv1i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1673,30 +1681,39 @@ declare @llvm.riscv.vmsltu.mask.nxv1i64.i64( , i64, , - i32); - -define @intrinsic_vmsltu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v11, (a0), zero -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vv v10, v8, v11, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsltu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v11, (a0), zero +; RV32-NEXT: vmv1r.v v10, v0 +; RV32-NEXT: vmv1r.v v0, v9 +; RV32-NEXT: vmsltu.vv v10, v8, v11, v0.t +; RV32-NEXT: vmv.v.v v0, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v10, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vmv1r.v v0, v9 +; RV64-NEXT: vmsltu.vx v10, v8, a0, v0.t +; RV64-NEXT: vmv.v.v v0, v10 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsltu.mask.nxv1i64.i64( %0, %1, i64 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1704,25 +1721,31 @@ entry: declare @llvm.riscv.vmsltu.nxv2i64.i64( , i64, - i32); - -define @intrinsic_vmsltu_vx_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vmsltu.vv v0, v8, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsltu_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsltu_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vmsltu.vv v0, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsltu_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vmsltu.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsltu.nxv2i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1732,30 +1755,39 @@ declare @llvm.riscv.vmsltu.mask.nxv2i64.i64( , i64, , - i32); - -define @intrinsic_vmsltu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsltu.vv v11, v8, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsltu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmv1r.v v11, v0 +; RV32-NEXT: vmv1r.v v0, v10 +; RV32-NEXT: vmsltu.vv v11, v8, v12, v0.t +; RV32-NEXT: vmv1r.v v0, v11 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v11, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vmv1r.v v0, v10 +; RV64-NEXT: vmsltu.vx v11, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v11 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsltu.mask.nxv2i64.i64( %0, %1, i64 %2, %3, - i32 %4) + iXLen %4) ret %a } @@ -1763,25 +1795,31 @@ entry: declare @llvm.riscv.vmsltu.nxv4i64.i64( , i64, - i32); - -define @intrinsic_vmsltu_vx_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmsltu.vv v0, v8, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsltu_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsltu_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmsltu.vv v0, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsltu_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmsltu.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsltu.nxv4i64.i64( %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1791,35 +1829,44 @@ declare @llvm.riscv.vmsltu.mask.nxv4i64.i64( , i64, , - i32); - -define @intrinsic_vmsltu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsltu.vv v13, v8, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsltu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vmv1r.v v13, v0 +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: vmsltu.vv v13, v8, v16, v0.t +; RV32-NEXT: vmv1r.v v0, v13 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v13, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: vmsltu.vx v13, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v13 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsltu.mask.nxv4i64.i64( %0, %1, i64 %2, %3, - i32 %4) + iXLen %4) ret %a } -define @intrinsic_vmsltu_vi_nxv1i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsltu_vi_nxv1i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -1829,12 +1876,12 @@ entry: %a = call @llvm.riscv.vmsltu.nxv1i8.i8( %0, i8 -15, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsltu_mask_vi_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsltu_mask_vi_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1849,12 +1896,12 @@ entry: %1, i8 -14, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsltu_vi_nxv2i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsltu_vi_nxv2i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -1864,12 +1911,12 @@ entry: %a = call @llvm.riscv.vmsltu.nxv2i8.i8( %0, i8 -13, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsltu_mask_vi_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsltu_mask_vi_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1884,12 +1931,12 @@ entry: %1, i8 -12, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsltu_vi_nxv4i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsltu_vi_nxv4i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -1899,12 +1946,12 @@ entry: %a = call @llvm.riscv.vmsltu.nxv4i8.i8( %0, i8 -11, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsltu_mask_vi_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsltu_mask_vi_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1919,12 +1966,12 @@ entry: %1, i8 -10, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsltu_vi_nxv8i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsltu_vi_nxv8i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -1934,12 +1981,12 @@ entry: %a = call @llvm.riscv.vmsltu.nxv8i8.i8( %0, i8 -9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsltu_mask_vi_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsltu_mask_vi_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1954,12 +2001,12 @@ entry: %1, i8 -8, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsltu_vi_nxv16i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsltu_vi_nxv16i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -1969,12 +2016,12 @@ entry: %a = call @llvm.riscv.vmsltu.nxv16i8.i8( %0, i8 -7, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsltu_mask_vi_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsltu_mask_vi_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1989,12 +2036,12 @@ entry: %1, i8 -6, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsltu_vi_nxv32i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vmsltu_vi_nxv32i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -2004,12 +2051,12 @@ entry: %a = call @llvm.riscv.vmsltu.nxv32i8.i8( %0, i8 -5, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsltu_mask_vi_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsltu_mask_vi_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2024,12 +2071,12 @@ entry: %1, i8 -4, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsltu_vi_nxv1i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsltu_vi_nxv1i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -2039,12 +2086,12 @@ entry: %a = call @llvm.riscv.vmsltu.nxv1i16.i16( %0, i16 -3, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsltu_mask_vi_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsltu_mask_vi_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2059,12 +2106,12 @@ entry: %1, i16 -2, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsltu_vi_nxv2i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsltu_vi_nxv2i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -2074,12 +2121,12 @@ entry: %a = call @llvm.riscv.vmsltu.nxv2i16.i16( %0, i16 -1, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsltu_mask_vi_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsltu_mask_vi_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2094,12 +2141,12 @@ entry: %1, i16 0, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsltu_vi_nxv4i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsltu_vi_nxv4i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -2109,12 +2156,12 @@ entry: %a = call @llvm.riscv.vmsltu.nxv4i16.i16( %0, i16 0, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsltu_mask_vi_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsltu_mask_vi_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2129,12 +2176,12 @@ entry: %1, i16 1, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsltu_vi_nxv8i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsltu_vi_nxv8i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -2144,12 +2191,12 @@ entry: %a = call @llvm.riscv.vmsltu.nxv8i16.i16( %0, i16 2, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsltu_mask_vi_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsltu_mask_vi_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2164,12 +2211,12 @@ entry: %1, i16 3, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsltu_vi_nxv16i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vmsltu_vi_nxv16i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -2179,12 +2226,12 @@ entry: %a = call @llvm.riscv.vmsltu.nxv16i16.i16( %0, i16 4, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsltu_mask_vi_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsltu_mask_vi_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2199,12 +2246,12 @@ entry: %1, i16 5, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsltu_vi_nxv1i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmsltu_vi_nxv1i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -2214,12 +2261,12 @@ entry: %a = call @llvm.riscv.vmsltu.nxv1i32.i32( %0, i32 6, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsltu_mask_vi_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsltu_mask_vi_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2234,12 +2281,12 @@ entry: %1, i32 7, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsltu_vi_nxv2i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmsltu_vi_nxv2i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -2249,12 +2296,12 @@ entry: %a = call @llvm.riscv.vmsltu.nxv2i32.i32( %0, i32 8, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsltu_mask_vi_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsltu_mask_vi_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2269,12 +2316,12 @@ entry: %1, i32 9, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsltu_vi_nxv4i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmsltu_vi_nxv4i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -2284,12 +2331,12 @@ entry: %a = call @llvm.riscv.vmsltu.nxv4i32.i32( %0, i32 10, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsltu_mask_vi_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsltu_mask_vi_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2304,12 +2351,12 @@ entry: %1, i32 11, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsltu_vi_nxv8i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vmsltu_vi_nxv8i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -2319,12 +2366,12 @@ entry: %a = call @llvm.riscv.vmsltu.nxv8i32.i32( %0, i32 12, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsltu_mask_vi_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsltu_mask_vi_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2339,12 +2386,12 @@ entry: %1, i32 13, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsltu_vi_nxv1i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vmsltu_vi_nxv1i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -2354,12 +2401,12 @@ entry: %a = call @llvm.riscv.vmsltu.nxv1i64.i64( %0, i64 14, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsltu_mask_vi_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsltu_mask_vi_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2374,12 +2421,12 @@ entry: %1, i64 15, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsltu_vi_nxv2i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vmsltu_vi_nxv2i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -2389,12 +2436,12 @@ entry: %a = call @llvm.riscv.vmsltu.nxv2i64.i64( %0, i64 16, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsltu_mask_vi_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsltu_mask_vi_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2409,12 +2456,12 @@ entry: %1, i64 -15, %2, - i32 %3) + iXLen %3) ret %a } -define @intrinsic_vmsltu_vi_nxv4i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vmsltu_vi_nxv4i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -2424,12 +2471,12 @@ entry: %a = call @llvm.riscv.vmsltu.nxv4i64.i64( %0, i64 -14, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vmsltu_mask_vi_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsltu_mask_vi_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2444,7 +2491,7 @@ entry: %1, i64 -13, %2, - i32 %3) + iXLen %3) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll deleted file mode 100644 index 9451533..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll +++ /dev/null @@ -1,2450 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmsne.nxv1i8( - , - , - i32); - -define @intrinsic_vmsne_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv1i8( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv1i8( - , - , - , - , - i32); - -define @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmsne.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsne.nxv1i8( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsne.mask.nxv1i8( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv2i8( - , - , - i32); - -define @intrinsic_vmsne_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv2i8( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv2i8( - , - , - , - , - i32); - -define @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmsne.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsne.nxv2i8( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsne.mask.nxv2i8( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv4i8( - , - , - i32); - -define @intrinsic_vmsne_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv4i8( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv4i8( - , - , - , - , - i32); - -define @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmsne.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsne.nxv4i8( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsne.mask.nxv4i8( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv8i8( - , - , - i32); - -define @intrinsic_vmsne_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv8i8( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv8i8( - , - , - , - , - i32); - -define @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmsne.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsne.nxv8i8( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsne.mask.nxv8i8( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv16i8( - , - , - i32); - -define @intrinsic_vmsne_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vv_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmsne.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv16i8( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv16i8( - , - , - , - , - i32); - -define @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmsne.vv v14, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsne.vv v8, v10, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsne.nxv16i8( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsne.mask.nxv16i8( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv32i8( - , - , - i32); - -define @intrinsic_vmsne_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vv_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmsne.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv32i8( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv32i8( - , - , - , - , - i32); - -define @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmsne.vv v20, v8, v12 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsne.vv v8, v12, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsne.nxv32i8( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsne.mask.nxv32i8( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv1i16( - , - , - i32); - -define @intrinsic_vmsne_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv1i16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv1i16( - , - , - , - , - i32); - -define @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmsne.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsne.nxv1i16( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsne.mask.nxv1i16( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv2i16( - , - , - i32); - -define @intrinsic_vmsne_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv2i16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv2i16( - , - , - , - , - i32); - -define @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmsne.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsne.nxv2i16( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsne.mask.nxv2i16( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv4i16( - , - , - i32); - -define @intrinsic_vmsne_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv4i16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv4i16( - , - , - , - , - i32); - -define @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmsne.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsne.nxv4i16( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsne.mask.nxv4i16( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv8i16( - , - , - i32); - -define @intrinsic_vmsne_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmsne.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv8i16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv8i16( - , - , - , - , - i32); - -define @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmsne.vv v14, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsne.vv v8, v10, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsne.nxv8i16( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsne.mask.nxv8i16( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv16i16( - , - , - i32); - -define @intrinsic_vmsne_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vv_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmsne.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv16i16( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv16i16( - , - , - , - , - i32); - -define @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmsne.vv v20, v8, v12 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsne.vv v8, v12, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsne.nxv16i16( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsne.mask.nxv16i16( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv1i32( - , - , - i32); - -define @intrinsic_vmsne_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv1i32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv1i32( - , - , - , - , - i32); - -define @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmsne.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsne.nxv1i32( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsne.mask.nxv1i32( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv2i32( - , - , - i32); - -define @intrinsic_vmsne_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv2i32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv2i32( - , - , - , - , - i32); - -define @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmsne.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsne.nxv2i32( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsne.mask.nxv2i32( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv4i32( - , - , - i32); - -define @intrinsic_vmsne_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmsne.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv4i32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv4i32( - , - , - , - , - i32); - -define @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmsne.vv v14, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsne.vv v8, v10, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsne.nxv4i32( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsne.mask.nxv4i32( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv8i32( - , - , - i32); - -define @intrinsic_vmsne_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmsne.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv8i32( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv8i32( - , - , - , - , - i32); - -define @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmsne.vv v20, v8, v12 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsne.vv v8, v12, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsne.nxv8i32( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsne.mask.nxv8i32( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv1i64( - , - , - i32); - -define @intrinsic_vmsne_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv1i64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv1i64( - , - , - , - , - i32); - -define @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmsne.vv v8, v8, v9 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv.v.v v0, v11 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsne.nxv1i64( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsne.mask.nxv1i64( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv2i64( - , - , - i32); - -define @intrinsic_vmsne_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmsne.vv v0, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv2i64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv2i64( - , - , - , - , - i32); - -define @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmsne.vv v14, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v14 -; CHECK-NEXT: vmsne.vv v8, v10, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsne.nxv2i64( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsne.mask.nxv2i64( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv4i64( - , - , - i32); - -define @intrinsic_vmsne_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmsne.vv v0, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv4i64( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv4i64( - , - , - , - , - i32); - -define @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmsne.vv v20, v8, v12 -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv1r.v v0, v20 -; CHECK-NEXT: vmsne.vv v8, v12, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %mask = call @llvm.riscv.vmsne.nxv4i64( - %1, - %2, - i32 %4) - %a = call @llvm.riscv.vmsne.mask.nxv4i64( - %0, - %2, - %3, - %mask, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv1i8.i8( - , - i8, - i32); - -define @intrinsic_vmsne_vx_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vmsne.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv1i8.i8( - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv1i8.i8( - , - , - i8, - , - i32); - -define @intrinsic_vmsne_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv1i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv2i8.i8( - , - i8, - i32); - -define @intrinsic_vmsne_vx_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vmsne.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv2i8.i8( - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv2i8.i8( - , - , - i8, - , - i32); - -define @intrinsic_vmsne_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv2i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv4i8.i8( - , - i8, - i32); - -define @intrinsic_vmsne_vx_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vmsne.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv4i8.i8( - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv4i8.i8( - , - , - i8, - , - i32); - -define @intrinsic_vmsne_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv4i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv8i8.i8( - , - i8, - i32); - -define @intrinsic_vmsne_vx_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vmsne.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv8i8.i8( - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv8i8.i8( - , - , - i8, - , - i32); - -define @intrinsic_vmsne_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv8i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv16i8.i8( - , - i8, - i32); - -define @intrinsic_vmsne_vx_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vx_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vmsne.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv16i8.i8( - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv16i8.i8( - , - , - i8, - , - i32); - -define @intrinsic_vmsne_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsne.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv16i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv32i8.i8( - , - i8, - i32); - -define @intrinsic_vmsne_vx_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vx_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vmsne.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv32i8.i8( - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv32i8.i8( - , - , - i8, - , - i32); - -define @intrinsic_vmsne_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsne.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv32i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv1i16.i16( - , - i16, - i32); - -define @intrinsic_vmsne_vx_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vmsne.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv1i16.i16( - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv1i16.i16( - , - , - i16, - , - i32); - -define @intrinsic_vmsne_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv1i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv2i16.i16( - , - i16, - i32); - -define @intrinsic_vmsne_vx_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vmsne.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv2i16.i16( - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv2i16.i16( - , - , - i16, - , - i32); - -define @intrinsic_vmsne_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv2i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv4i16.i16( - , - i16, - i32); - -define @intrinsic_vmsne_vx_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vmsne.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv4i16.i16( - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv4i16.i16( - , - , - i16, - , - i32); - -define @intrinsic_vmsne_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv4i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv8i16.i16( - , - i16, - i32); - -define @intrinsic_vmsne_vx_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vmsne.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv8i16.i16( - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv8i16.i16( - , - , - i16, - , - i32); - -define @intrinsic_vmsne_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsne.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv8i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv16i16.i16( - , - i16, - i32); - -define @intrinsic_vmsne_vx_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vx_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vmsne.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv16i16.i16( - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv16i16.i16( - , - , - i16, - , - i32); - -define @intrinsic_vmsne_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsne.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv16i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv1i32.i32( - , - i32, - i32); - -define @intrinsic_vmsne_vx_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vmsne.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv1i32.i32( - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv1i32.i32( - , - , - i32, - , - i32); - -define @intrinsic_vmsne_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv1i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv2i32.i32( - , - i32, - i32); - -define @intrinsic_vmsne_vx_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vmsne.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv2i32.i32( - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv2i32.i32( - , - , - i32, - , - i32); - -define @intrinsic_vmsne_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv2i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv4i32.i32( - , - i32, - i32); - -define @intrinsic_vmsne_vx_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vmsne.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv4i32.i32( - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv4i32.i32( - , - , - i32, - , - i32); - -define @intrinsic_vmsne_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsne.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv4i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv8i32.i32( - , - i32, - i32); - -define @intrinsic_vmsne_vx_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vmsne.vx v0, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv8i32.i32( - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv8i32.i32( - , - , - i32, - , - i32); - -define @intrinsic_vmsne_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsne.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv8i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv1i64.i64( - , - i64, - i32); - -define @intrinsic_vmsne_vx_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vmsne.vv v0, v8, v9 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv1i64.i64( - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv1i64.i64( - , - , - i64, - , - i32); - -define @intrinsic_vmsne_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v11, (a0), zero -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsne.vv v10, v8, v11, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv2i64.i64( - , - i64, - i32); - -define @intrinsic_vmsne_vx_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vmsne.vv v0, v8, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv2i64.i64( - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv2i64.i64( - , - , - i64, - , - i32); - -define @intrinsic_vmsne_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsne.vv v11, v8, v12, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vmsne.nxv4i64.i64( - , - i64, - i32); - -define @intrinsic_vmsne_vx_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vmsne.vv v0, v8, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv4i64.i64( - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmsne.mask.nxv4i64.i64( - , - , - i64, - , - i32); - -define @intrinsic_vmsne_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsne.vv v13, v8, v16, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4) - - ret %a -} - -define @intrinsic_vmsne_vi_nxv1i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmsne.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv1i8.i8( - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsne_mask_vi_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv1i8.i8( - %0, - %1, - i8 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsne_vi_nxv2i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmsne.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv2i8.i8( - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsne_mask_vi_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv2i8.i8( - %0, - %1, - i8 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsne_vi_nxv4i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmsne.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv4i8.i8( - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsne_mask_vi_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv4i8.i8( - %0, - %1, - i8 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsne_vi_nxv8i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmsne.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv8i8.i8( - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsne_mask_vi_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv8i8.i8( - %0, - %1, - i8 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsne_vi_nxv16i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vi_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmsne.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv16i8.i8( - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsne_mask_vi_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsne.vi v11, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv16i8.i8( - %0, - %1, - i8 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsne_vi_nxv32i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vi_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmsne.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv32i8.i8( - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsne_mask_vi_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsne.vi v13, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv32i8.i8( - %0, - %1, - i8 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsne_vi_nxv1i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmsne.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv1i16.i16( - %0, - i16 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsne_mask_vi_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv1i16.i16( - %0, - %1, - i16 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsne_vi_nxv2i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmsne.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv2i16.i16( - %0, - i16 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsne_mask_vi_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv2i16.i16( - %0, - %1, - i16 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsne_vi_nxv4i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmsne.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv4i16.i16( - %0, - i16 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsne_mask_vi_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv4i16.i16( - %0, - %1, - i16 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsne_vi_nxv8i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmsne.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv8i16.i16( - %0, - i16 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsne_mask_vi_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsne.vi v11, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv8i16.i16( - %0, - %1, - i16 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsne_vi_nxv16i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vi_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmsne.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv16i16.i16( - %0, - i16 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsne_mask_vi_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsne.vi v13, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv16i16.i16( - %0, - %1, - i16 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsne_vi_nxv1i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vmsne.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv1i32.i32( - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsne_mask_vi_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv1i32.i32( - %0, - %1, - i32 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsne_vi_nxv2i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmsne.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv2i32.i32( - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsne_mask_vi_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv2i32.i32( - %0, - %1, - i32 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsne_vi_nxv4i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vmsne.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv4i32.i32( - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsne_mask_vi_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsne.vi v11, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv4i32.i32( - %0, - %1, - i32 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsne_vi_nxv8i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmsne.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv8i32.i32( - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsne_mask_vi_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsne.vi v13, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv8i32.i32( - %0, - %1, - i32 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsne_vi_nxv1i64_i64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vmsne.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv1i64.i64( - %0, - i64 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsne_mask_vi_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv1i64.i64( - %0, - %1, - i64 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsne_vi_nxv2i64_i64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmsne.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv2i64.i64( - %0, - i64 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsne_mask_vi_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsne.vi v11, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv2i64.i64( - %0, - %1, - i64 9, - %2, - i32 %3) - - ret %a -} - -define @intrinsic_vmsne_vi_nxv4i64_i64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmsne.vi v0, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.nxv4i64.i64( - %0, - i64 9, - i32 %1) - - ret %a -} - -define @intrinsic_vmsne_mask_vi_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsne.vi v13, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsne.mask.nxv4i64.i64( - %0, - %1, - i64 9, - %2, - i32 %3) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne.ll similarity index 84% rename from llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vmsne.ll index 600881a..ea7ca05 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsne.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vmsne.nxv1i8( , , - i64); + iXLen); -define @intrinsic_vmsne_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsne_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -16,7 +18,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv1i8( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -26,9 +28,9 @@ declare @llvm.riscv.vmsne.mask.nxv1i8( , , , - i64); + iXLen); -define @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -42,13 +44,13 @@ entry: %mask = call @llvm.riscv.vmsne.nxv1i8( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsne.mask.nxv1i8( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -56,9 +58,9 @@ entry: declare @llvm.riscv.vmsne.nxv2i8( , , - i64); + iXLen); -define @intrinsic_vmsne_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsne_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -68,7 +70,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv2i8( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -78,9 +80,9 @@ declare @llvm.riscv.vmsne.mask.nxv2i8( , , , - i64); + iXLen); -define @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -94,13 +96,13 @@ entry: %mask = call @llvm.riscv.vmsne.nxv2i8( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsne.mask.nxv2i8( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -108,9 +110,9 @@ entry: declare @llvm.riscv.vmsne.nxv4i8( , , - i64); + iXLen); -define @intrinsic_vmsne_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsne_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -120,7 +122,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv4i8( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -130,9 +132,9 @@ declare @llvm.riscv.vmsne.mask.nxv4i8( , , , - i64); + iXLen); -define @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -146,13 +148,13 @@ entry: %mask = call @llvm.riscv.vmsne.nxv4i8( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsne.mask.nxv4i8( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -160,9 +162,9 @@ entry: declare @llvm.riscv.vmsne.nxv8i8( , , - i64); + iXLen); -define @intrinsic_vmsne_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsne_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -172,7 +174,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv8i8( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -182,9 +184,9 @@ declare @llvm.riscv.vmsne.mask.nxv8i8( , , , - i64); + iXLen); -define @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -198,13 +200,13 @@ entry: %mask = call @llvm.riscv.vmsne.nxv8i8( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsne.mask.nxv8i8( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -212,9 +214,9 @@ entry: declare @llvm.riscv.vmsne.nxv16i8( , , - i64); + iXLen); -define @intrinsic_vmsne_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsne_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -224,7 +226,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv16i8( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -234,9 +236,9 @@ declare @llvm.riscv.vmsne.mask.nxv16i8( , , , - i64); + iXLen); -define @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -250,13 +252,13 @@ entry: %mask = call @llvm.riscv.vmsne.nxv16i8( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsne.mask.nxv16i8( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -264,9 +266,9 @@ entry: declare @llvm.riscv.vmsne.nxv32i8( , , - i64); + iXLen); -define @intrinsic_vmsne_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsne_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -276,7 +278,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv32i8( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -286,9 +288,9 @@ declare @llvm.riscv.vmsne.mask.nxv32i8( , , , - i64); + iXLen); -define @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -302,13 +304,13 @@ entry: %mask = call @llvm.riscv.vmsne.nxv32i8( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsne.mask.nxv32i8( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -316,9 +318,9 @@ entry: declare @llvm.riscv.vmsne.nxv1i16( , , - i64); + iXLen); -define @intrinsic_vmsne_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsne_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -328,7 +330,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv1i16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -338,9 +340,9 @@ declare @llvm.riscv.vmsne.mask.nxv1i16( , , , - i64); + iXLen); -define @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -354,13 +356,13 @@ entry: %mask = call @llvm.riscv.vmsne.nxv1i16( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsne.mask.nxv1i16( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -368,9 +370,9 @@ entry: declare @llvm.riscv.vmsne.nxv2i16( , , - i64); + iXLen); -define @intrinsic_vmsne_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsne_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -380,7 +382,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv2i16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -390,9 +392,9 @@ declare @llvm.riscv.vmsne.mask.nxv2i16( , , , - i64); + iXLen); -define @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -406,13 +408,13 @@ entry: %mask = call @llvm.riscv.vmsne.nxv2i16( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsne.mask.nxv2i16( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -420,9 +422,9 @@ entry: declare @llvm.riscv.vmsne.nxv4i16( , , - i64); + iXLen); -define @intrinsic_vmsne_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsne_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -432,7 +434,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv4i16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -442,9 +444,9 @@ declare @llvm.riscv.vmsne.mask.nxv4i16( , , , - i64); + iXLen); -define @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -458,13 +460,13 @@ entry: %mask = call @llvm.riscv.vmsne.nxv4i16( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsne.mask.nxv4i16( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -472,9 +474,9 @@ entry: declare @llvm.riscv.vmsne.nxv8i16( , , - i64); + iXLen); -define @intrinsic_vmsne_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsne_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -484,7 +486,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv8i16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -494,9 +496,9 @@ declare @llvm.riscv.vmsne.mask.nxv8i16( , , , - i64); + iXLen); -define @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -510,13 +512,13 @@ entry: %mask = call @llvm.riscv.vmsne.nxv8i16( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsne.mask.nxv8i16( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -524,9 +526,9 @@ entry: declare @llvm.riscv.vmsne.nxv16i16( , , - i64); + iXLen); -define @intrinsic_vmsne_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsne_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -536,7 +538,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv16i16( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -546,9 +548,9 @@ declare @llvm.riscv.vmsne.mask.nxv16i16( , , , - i64); + iXLen); -define @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -562,13 +564,13 @@ entry: %mask = call @llvm.riscv.vmsne.nxv16i16( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsne.mask.nxv16i16( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -576,9 +578,9 @@ entry: declare @llvm.riscv.vmsne.nxv1i32( , , - i64); + iXLen); -define @intrinsic_vmsne_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsne_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -588,7 +590,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv1i32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -598,9 +600,9 @@ declare @llvm.riscv.vmsne.mask.nxv1i32( , , , - i64); + iXLen); -define @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -614,13 +616,13 @@ entry: %mask = call @llvm.riscv.vmsne.nxv1i32( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsne.mask.nxv1i32( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -628,9 +630,9 @@ entry: declare @llvm.riscv.vmsne.nxv2i32( , , - i64); + iXLen); -define @intrinsic_vmsne_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsne_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -640,7 +642,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv2i32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -650,9 +652,9 @@ declare @llvm.riscv.vmsne.mask.nxv2i32( , , , - i64); + iXLen); -define @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -666,13 +668,13 @@ entry: %mask = call @llvm.riscv.vmsne.nxv2i32( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsne.mask.nxv2i32( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -680,9 +682,9 @@ entry: declare @llvm.riscv.vmsne.nxv4i32( , , - i64); + iXLen); -define @intrinsic_vmsne_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsne_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -692,7 +694,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv4i32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -702,9 +704,9 @@ declare @llvm.riscv.vmsne.mask.nxv4i32( , , , - i64); + iXLen); -define @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -718,13 +720,13 @@ entry: %mask = call @llvm.riscv.vmsne.nxv4i32( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsne.mask.nxv4i32( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -732,9 +734,9 @@ entry: declare @llvm.riscv.vmsne.nxv8i32( , , - i64); + iXLen); -define @intrinsic_vmsne_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsne_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -744,7 +746,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv8i32( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -754,9 +756,9 @@ declare @llvm.riscv.vmsne.mask.nxv8i32( , , , - i64); + iXLen); -define @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -770,13 +772,13 @@ entry: %mask = call @llvm.riscv.vmsne.nxv8i32( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsne.mask.nxv8i32( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -784,9 +786,9 @@ entry: declare @llvm.riscv.vmsne.nxv1i64( , , - i64); + iXLen); -define @intrinsic_vmsne_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsne_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -796,7 +798,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv1i64( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -806,9 +808,9 @@ declare @llvm.riscv.vmsne.mask.nxv1i64( , , , - i64); + iXLen); -define @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -822,13 +824,13 @@ entry: %mask = call @llvm.riscv.vmsne.nxv1i64( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsne.mask.nxv1i64( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -836,9 +838,9 @@ entry: declare @llvm.riscv.vmsne.nxv2i64( , , - i64); + iXLen); -define @intrinsic_vmsne_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsne_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -848,7 +850,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv2i64( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -858,9 +860,9 @@ declare @llvm.riscv.vmsne.mask.nxv2i64( , , , - i64); + iXLen); -define @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -874,13 +876,13 @@ entry: %mask = call @llvm.riscv.vmsne.nxv2i64( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsne.mask.nxv2i64( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -888,9 +890,9 @@ entry: declare @llvm.riscv.vmsne.nxv4i64( , , - i64); + iXLen); -define @intrinsic_vmsne_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vmsne_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -900,7 +902,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv4i64( %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -910,9 +912,9 @@ declare @llvm.riscv.vmsne.mask.nxv4i64( , , , - i64); + iXLen); -define @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -926,13 +928,13 @@ entry: %mask = call @llvm.riscv.vmsne.nxv4i64( %1, %2, - i64 %4) + iXLen %4) %a = call @llvm.riscv.vmsne.mask.nxv4i64( %0, %2, %3, %mask, - i64 %4) + iXLen %4) ret %a } @@ -940,9 +942,9 @@ entry: declare @llvm.riscv.vmsne.nxv1i8.i8( , i8, - i64); + iXLen); -define @intrinsic_vmsne_vx_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmsne_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -952,7 +954,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv1i8.i8( %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -962,9 +964,9 @@ declare @llvm.riscv.vmsne.mask.nxv1i8.i8( , i8, , - i64); + iXLen); -define @intrinsic_vmsne_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -979,7 +981,7 @@ entry: %1, i8 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -987,9 +989,9 @@ entry: declare @llvm.riscv.vmsne.nxv2i8.i8( , i8, - i64); + iXLen); -define @intrinsic_vmsne_vx_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmsne_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -999,7 +1001,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv2i8.i8( %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1009,9 +1011,9 @@ declare @llvm.riscv.vmsne.mask.nxv2i8.i8( , i8, , - i64); + iXLen); -define @intrinsic_vmsne_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1026,7 +1028,7 @@ entry: %1, i8 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1034,9 +1036,9 @@ entry: declare @llvm.riscv.vmsne.nxv4i8.i8( , i8, - i64); + iXLen); -define @intrinsic_vmsne_vx_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmsne_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -1046,7 +1048,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv4i8.i8( %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1056,9 +1058,9 @@ declare @llvm.riscv.vmsne.mask.nxv4i8.i8( , i8, , - i64); + iXLen); -define @intrinsic_vmsne_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1073,7 +1075,7 @@ entry: %1, i8 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1081,9 +1083,9 @@ entry: declare @llvm.riscv.vmsne.nxv8i8.i8( , i8, - i64); + iXLen); -define @intrinsic_vmsne_vx_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmsne_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -1093,7 +1095,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv8i8.i8( %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1103,9 +1105,9 @@ declare @llvm.riscv.vmsne.mask.nxv8i8.i8( , i8, , - i64); + iXLen); -define @intrinsic_vmsne_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1120,7 +1122,7 @@ entry: %1, i8 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1128,9 +1130,9 @@ entry: declare @llvm.riscv.vmsne.nxv16i8.i8( , i8, - i64); + iXLen); -define @intrinsic_vmsne_vx_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmsne_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -1140,7 +1142,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv16i8.i8( %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1150,9 +1152,9 @@ declare @llvm.riscv.vmsne.mask.nxv16i8.i8( , i8, , - i64); + iXLen); -define @intrinsic_vmsne_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1167,7 +1169,7 @@ entry: %1, i8 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1175,9 +1177,9 @@ entry: declare @llvm.riscv.vmsne.nxv32i8.i8( , i8, - i64); + iXLen); -define @intrinsic_vmsne_vx_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vmsne_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -1187,7 +1189,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv32i8.i8( %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1197,9 +1199,9 @@ declare @llvm.riscv.vmsne.mask.nxv32i8.i8( , i8, , - i64); + iXLen); -define @intrinsic_vmsne_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1214,7 +1216,7 @@ entry: %1, i8 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1222,9 +1224,9 @@ entry: declare @llvm.riscv.vmsne.nxv1i16.i16( , i16, - i64); + iXLen); -define @intrinsic_vmsne_vx_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmsne_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -1234,7 +1236,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv1i16.i16( %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1244,9 +1246,9 @@ declare @llvm.riscv.vmsne.mask.nxv1i16.i16( , i16, , - i64); + iXLen); -define @intrinsic_vmsne_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1261,7 +1263,7 @@ entry: %1, i16 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1269,9 +1271,9 @@ entry: declare @llvm.riscv.vmsne.nxv2i16.i16( , i16, - i64); + iXLen); -define @intrinsic_vmsne_vx_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmsne_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -1281,7 +1283,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv2i16.i16( %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1291,9 +1293,9 @@ declare @llvm.riscv.vmsne.mask.nxv2i16.i16( , i16, , - i64); + iXLen); -define @intrinsic_vmsne_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1308,7 +1310,7 @@ entry: %1, i16 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1316,9 +1318,9 @@ entry: declare @llvm.riscv.vmsne.nxv4i16.i16( , i16, - i64); + iXLen); -define @intrinsic_vmsne_vx_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmsne_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -1328,7 +1330,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv4i16.i16( %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1338,9 +1340,9 @@ declare @llvm.riscv.vmsne.mask.nxv4i16.i16( , i16, , - i64); + iXLen); -define @intrinsic_vmsne_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1355,7 +1357,7 @@ entry: %1, i16 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1363,9 +1365,9 @@ entry: declare @llvm.riscv.vmsne.nxv8i16.i16( , i16, - i64); + iXLen); -define @intrinsic_vmsne_vx_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmsne_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -1375,7 +1377,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv8i16.i16( %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1385,9 +1387,9 @@ declare @llvm.riscv.vmsne.mask.nxv8i16.i16( , i16, , - i64); + iXLen); -define @intrinsic_vmsne_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1402,7 +1404,7 @@ entry: %1, i16 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1410,9 +1412,9 @@ entry: declare @llvm.riscv.vmsne.nxv16i16.i16( , i16, - i64); + iXLen); -define @intrinsic_vmsne_vx_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vmsne_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -1422,7 +1424,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv16i16.i16( %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1432,9 +1434,9 @@ declare @llvm.riscv.vmsne.mask.nxv16i16.i16( , i16, , - i64); + iXLen); -define @intrinsic_vmsne_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1449,7 +1451,7 @@ entry: %1, i16 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1457,9 +1459,9 @@ entry: declare @llvm.riscv.vmsne.nxv1i32.i32( , i32, - i64); + iXLen); -define @intrinsic_vmsne_vx_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vmsne_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -1469,7 +1471,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv1i32.i32( %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1479,9 +1481,9 @@ declare @llvm.riscv.vmsne.mask.nxv1i32.i32( , i32, , - i64); + iXLen); -define @intrinsic_vmsne_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1496,7 +1498,7 @@ entry: %1, i32 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1504,9 +1506,9 @@ entry: declare @llvm.riscv.vmsne.nxv2i32.i32( , i32, - i64); + iXLen); -define @intrinsic_vmsne_vx_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vmsne_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -1516,7 +1518,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv2i32.i32( %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1526,9 +1528,9 @@ declare @llvm.riscv.vmsne.mask.nxv2i32.i32( , i32, , - i64); + iXLen); -define @intrinsic_vmsne_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1543,7 +1545,7 @@ entry: %1, i32 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1551,9 +1553,9 @@ entry: declare @llvm.riscv.vmsne.nxv4i32.i32( , i32, - i64); + iXLen); -define @intrinsic_vmsne_vx_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vmsne_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -1563,7 +1565,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv4i32.i32( %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1573,9 +1575,9 @@ declare @llvm.riscv.vmsne.mask.nxv4i32.i32( , i32, , - i64); + iXLen); -define @intrinsic_vmsne_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1590,7 +1592,7 @@ entry: %1, i32 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1598,9 +1600,9 @@ entry: declare @llvm.riscv.vmsne.nxv8i32.i32( , i32, - i64); + iXLen); -define @intrinsic_vmsne_vx_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vmsne_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -1610,7 +1612,7 @@ entry: %a = call @llvm.riscv.vmsne.nxv8i32.i32( %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1620,9 +1622,9 @@ declare @llvm.riscv.vmsne.mask.nxv8i32.i32( , i32, , - i64); + iXLen); -define @intrinsic_vmsne_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vmsne_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1637,7 +1639,7 @@ entry: %1, i32 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1645,19 +1647,31 @@ entry: declare @llvm.riscv.vmsne.nxv1i64.i64( , i64, - i64); - -define @intrinsic_vmsne_vx_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vmsne.vx v0, v8, a0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsne_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsne_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vmsne.vv v0, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsne_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmsne.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsne.nxv1i64.i64( %0, i64 %1, - i64 %2) + iXLen %2) ret %a } @@ -1667,24 +1681,39 @@ declare @llvm.riscv.vmsne.mask.nxv1i64.i64( , i64, , - i64); - -define @intrinsic_vmsne_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv.v.v v0, v10 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsne_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v11, (a0), zero +; RV32-NEXT: vmv1r.v v10, v0 +; RV32-NEXT: vmv1r.v v0, v9 +; RV32-NEXT: vmsne.vv v10, v8, v11, v0.t +; RV32-NEXT: vmv.v.v v0, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v10, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vmv1r.v v0, v9 +; RV64-NEXT: vmsne.vx v10, v8, a0, v0.t +; RV64-NEXT: vmv.v.v v0, v10 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsne.mask.nxv1i64.i64( %0, %1, i64 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1692,19 +1721,31 @@ entry: declare @llvm.riscv.vmsne.nxv2i64.i64( , i64, - i64); - -define @intrinsic_vmsne_vx_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vmsne.vx v0, v8, a0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsne_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsne_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vmsne.vv v0, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsne_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vmsne.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsne.nxv2i64.i64( %0, i64 %1, - i64 %2) + iXLen %2) ret %a } @@ -1714,24 +1755,39 @@ declare @llvm.riscv.vmsne.mask.nxv2i64.i64( , i64, , - i64); - -define @intrinsic_vmsne_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmsne.vx v11, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsne_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmv1r.v v11, v0 +; RV32-NEXT: vmv1r.v v0, v10 +; RV32-NEXT: vmsne.vv v11, v8, v12, v0.t +; RV32-NEXT: vmv1r.v v0, v11 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v11, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vmv1r.v v0, v10 +; RV64-NEXT: vmsne.vx v11, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v11 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsne.mask.nxv2i64.i64( %0, %1, i64 %2, %3, - i64 %4) + iXLen %4) ret %a } @@ -1739,19 +1795,31 @@ entry: declare @llvm.riscv.vmsne.nxv4i64.i64( , i64, - i64); - -define @intrinsic_vmsne_vx_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vmsne.vx v0, v8, a0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsne_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmsne_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vmsne.vv v0, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsne_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmsne.vx v0, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsne.nxv4i64.i64( %0, i64 %1, - i64 %2) + iXLen %2) ret %a } @@ -1761,29 +1829,44 @@ declare @llvm.riscv.vmsne.mask.nxv4i64.i64( , i64, , - i64); - -define @intrinsic_vmsne_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v13, v0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmsne.vx v13, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vmsne_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vmv1r.v v13, v0 +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: vmsne.vv v13, v8, v16, v0.t +; RV32-NEXT: vmv1r.v v0, v13 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vmv1r.v v13, v0 +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: vmsne.vx v13, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v13 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmsne.mask.nxv4i64.i64( %0, %1, i64 %2, %3, - i64 %4) + iXLen %4) ret %a } -define @intrinsic_vmsne_vi_nxv1i8_i8( %0, i64 %1) nounwind { +define @intrinsic_vmsne_vi_nxv1i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -1793,12 +1876,12 @@ entry: %a = call @llvm.riscv.vmsne.nxv1i8.i8( %0, i8 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsne_mask_vi_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsne_mask_vi_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1813,12 +1896,12 @@ entry: %1, i8 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsne_vi_nxv2i8_i8( %0, i64 %1) nounwind { +define @intrinsic_vmsne_vi_nxv2i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -1828,12 +1911,12 @@ entry: %a = call @llvm.riscv.vmsne.nxv2i8.i8( %0, i8 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsne_mask_vi_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsne_mask_vi_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1848,12 +1931,12 @@ entry: %1, i8 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsne_vi_nxv4i8_i8( %0, i64 %1) nounwind { +define @intrinsic_vmsne_vi_nxv4i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -1863,12 +1946,12 @@ entry: %a = call @llvm.riscv.vmsne.nxv4i8.i8( %0, i8 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsne_mask_vi_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsne_mask_vi_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1883,12 +1966,12 @@ entry: %1, i8 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsne_vi_nxv8i8_i8( %0, i64 %1) nounwind { +define @intrinsic_vmsne_vi_nxv8i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -1898,12 +1981,12 @@ entry: %a = call @llvm.riscv.vmsne.nxv8i8.i8( %0, i8 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsne_mask_vi_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsne_mask_vi_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -1918,12 +2001,12 @@ entry: %1, i8 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsne_vi_nxv16i8_i8( %0, i64 %1) nounwind { +define @intrinsic_vmsne_vi_nxv16i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -1933,12 +2016,12 @@ entry: %a = call @llvm.riscv.vmsne.nxv16i8.i8( %0, i8 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsne_mask_vi_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsne_mask_vi_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -1953,12 +2036,12 @@ entry: %1, i8 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsne_vi_nxv32i8_i8( %0, i64 %1) nounwind { +define @intrinsic_vmsne_vi_nxv32i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -1968,12 +2051,12 @@ entry: %a = call @llvm.riscv.vmsne.nxv32i8.i8( %0, i8 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsne_mask_vi_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsne_mask_vi_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -1988,12 +2071,12 @@ entry: %1, i8 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsne_vi_nxv1i16_i16( %0, i64 %1) nounwind { +define @intrinsic_vmsne_vi_nxv1i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -2003,12 +2086,12 @@ entry: %a = call @llvm.riscv.vmsne.nxv1i16.i16( %0, i16 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsne_mask_vi_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsne_mask_vi_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2023,12 +2106,12 @@ entry: %1, i16 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsne_vi_nxv2i16_i16( %0, i64 %1) nounwind { +define @intrinsic_vmsne_vi_nxv2i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -2038,12 +2121,12 @@ entry: %a = call @llvm.riscv.vmsne.nxv2i16.i16( %0, i16 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsne_mask_vi_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsne_mask_vi_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2058,12 +2141,12 @@ entry: %1, i16 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsne_vi_nxv4i16_i16( %0, i64 %1) nounwind { +define @intrinsic_vmsne_vi_nxv4i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -2073,12 +2156,12 @@ entry: %a = call @llvm.riscv.vmsne.nxv4i16.i16( %0, i16 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsne_mask_vi_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsne_mask_vi_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2093,12 +2176,12 @@ entry: %1, i16 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsne_vi_nxv8i16_i16( %0, i64 %1) nounwind { +define @intrinsic_vmsne_vi_nxv8i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -2108,12 +2191,12 @@ entry: %a = call @llvm.riscv.vmsne.nxv8i16.i16( %0, i16 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsne_mask_vi_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsne_mask_vi_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2128,12 +2211,12 @@ entry: %1, i16 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsne_vi_nxv16i16_i16( %0, i64 %1) nounwind { +define @intrinsic_vmsne_vi_nxv16i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -2143,12 +2226,12 @@ entry: %a = call @llvm.riscv.vmsne.nxv16i16.i16( %0, i16 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsne_mask_vi_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsne_mask_vi_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2163,12 +2246,12 @@ entry: %1, i16 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsne_vi_nxv1i32_i32( %0, i64 %1) nounwind { +define @intrinsic_vmsne_vi_nxv1i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -2178,12 +2261,12 @@ entry: %a = call @llvm.riscv.vmsne.nxv1i32.i32( %0, i32 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsne_mask_vi_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsne_mask_vi_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2198,12 +2281,12 @@ entry: %1, i32 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsne_vi_nxv2i32_i32( %0, i64 %1) nounwind { +define @intrinsic_vmsne_vi_nxv2i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -2213,12 +2296,12 @@ entry: %a = call @llvm.riscv.vmsne.nxv2i32.i32( %0, i32 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsne_mask_vi_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsne_mask_vi_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2233,12 +2316,12 @@ entry: %1, i32 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsne_vi_nxv4i32_i32( %0, i64 %1) nounwind { +define @intrinsic_vmsne_vi_nxv4i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -2248,12 +2331,12 @@ entry: %a = call @llvm.riscv.vmsne.nxv4i32.i32( %0, i32 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsne_mask_vi_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsne_mask_vi_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2268,12 +2351,12 @@ entry: %1, i32 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsne_vi_nxv8i32_i32( %0, i64 %1) nounwind { +define @intrinsic_vmsne_vi_nxv8i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -2283,12 +2366,12 @@ entry: %a = call @llvm.riscv.vmsne.nxv8i32.i32( %0, i32 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsne_mask_vi_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsne_mask_vi_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2303,12 +2386,12 @@ entry: %1, i32 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsne_vi_nxv1i64_i64( %0, i64 %1) nounwind { +define @intrinsic_vmsne_vi_nxv1i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -2318,12 +2401,12 @@ entry: %a = call @llvm.riscv.vmsne.nxv1i64.i64( %0, i64 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsne_mask_vi_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsne_mask_vi_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -2338,12 +2421,12 @@ entry: %1, i64 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsne_vi_nxv2i64_i64( %0, i64 %1) nounwind { +define @intrinsic_vmsne_vi_nxv2i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -2353,12 +2436,12 @@ entry: %a = call @llvm.riscv.vmsne.nxv2i64.i64( %0, i64 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsne_mask_vi_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsne_mask_vi_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v11, v0 @@ -2373,12 +2456,12 @@ entry: %1, i64 9, %2, - i64 %3) + iXLen %3) ret %a } -define @intrinsic_vmsne_vi_nxv4i64_i64( %0, i64 %1) nounwind { +define @intrinsic_vmsne_vi_nxv4i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -2388,12 +2471,12 @@ entry: %a = call @llvm.riscv.vmsne.nxv4i64.i64( %0, i64 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vmsne_mask_vi_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vmsne_mask_vi_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v13, v0 @@ -2408,7 +2491,7 @@ entry: %1, i64 9, %2, - i64 %3) + iXLen %3) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll deleted file mode 100644 index ff86a87..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll +++ /dev/null @@ -1,1694 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vnmsac.nxv1i8.nxv1i8( - , - , - , - i64, - i64); - -define @intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma -; CHECK-NEXT: vnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu -; CHECK-NEXT: vnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv2i8.nxv2i8( - , - , - , - i64, - i64); - -define @intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma -; CHECK-NEXT: vnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu -; CHECK-NEXT: vnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv4i8.nxv4i8( - , - , - , - i64, - i64); - -define @intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma -; CHECK-NEXT: vnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu -; CHECK-NEXT: vnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv8i8.nxv8i8( - , - , - , - i64, - i64); - -define @intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma -; CHECK-NEXT: vnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu -; CHECK-NEXT: vnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv16i8.nxv16i8( - , - , - , - i64, - i64); - -define @intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma -; CHECK-NEXT: vnmsac.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu -; CHECK-NEXT: vnmsac.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv32i8.nxv32i8( - , - , - , - i64, - i64); - -define @intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma -; CHECK-NEXT: vnmsac.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu -; CHECK-NEXT: vnmsac.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv1i16.nxv1i16( - , - , - , - i64, - i64); - -define @intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma -; CHECK-NEXT: vnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv2i16.nxv2i16( - , - , - , - i64, - i64); - -define @intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma -; CHECK-NEXT: vnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv4i16.nxv4i16( - , - , - , - i64, - i64); - -define @intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma -; CHECK-NEXT: vnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv8i16.nxv8i16( - , - , - , - i64, - i64); - -define @intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma -; CHECK-NEXT: vnmsac.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vnmsac.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv16i16.nxv16i16( - , - , - , - i64, - i64); - -define @intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma -; CHECK-NEXT: vnmsac.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vnmsac.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv1i32.nxv1i32( - , - , - , - i64, - i64); - -define @intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma -; CHECK-NEXT: vnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv2i32.nxv2i32( - , - , - , - i64, - i64); - -define @intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma -; CHECK-NEXT: vnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv4i32.nxv4i32( - , - , - , - i64, - i64); - -define @intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma -; CHECK-NEXT: vnmsac.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vnmsac.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv8i32.nxv8i32( - , - , - , - i64, - i64); - -define @intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma -; CHECK-NEXT: vnmsac.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vnmsac.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv1i64.nxv1i64( - , - , - , - i64, - i64); - -define @intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma -; CHECK-NEXT: vnmsac.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vnmsac.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv2i64.nxv2i64( - , - , - , - i64, - i64); - -define @intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma -; CHECK-NEXT: vnmsac.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vnmsac.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv4i64.nxv4i64( - , - , - , - i64, - i64); - -define @intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma -; CHECK-NEXT: vnmsac.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vnmsac.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv1i8.i8( - , - i8, - , - i64, - i64); - -define @intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma -; CHECK-NEXT: vnmsac.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv1i8.i8( - %0, - i8 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv1i8.i8( - , - i8, - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu -; CHECK-NEXT: vnmsac.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv1i8.i8( - %0, - i8 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv2i8.i8( - , - i8, - , - i64, - i64); - -define @intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma -; CHECK-NEXT: vnmsac.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv2i8.i8( - %0, - i8 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv2i8.i8( - , - i8, - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu -; CHECK-NEXT: vnmsac.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv2i8.i8( - %0, - i8 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv4i8.i8( - , - i8, - , - i64, - i64); - -define @intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma -; CHECK-NEXT: vnmsac.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv4i8.i8( - %0, - i8 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv4i8.i8( - , - i8, - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu -; CHECK-NEXT: vnmsac.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv4i8.i8( - %0, - i8 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv8i8.i8( - , - i8, - , - i64, - i64); - -define @intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma -; CHECK-NEXT: vnmsac.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv8i8.i8( - %0, - i8 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv8i8.i8( - , - i8, - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu -; CHECK-NEXT: vnmsac.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv8i8.i8( - %0, - i8 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv16i8.i8( - , - i8, - , - i64, - i64); - -define @intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma -; CHECK-NEXT: vnmsac.vx v8, a0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv16i8.i8( - %0, - i8 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv16i8.i8( - , - i8, - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu -; CHECK-NEXT: vnmsac.vx v8, a0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv16i8.i8( - %0, - i8 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv32i8.i8( - , - i8, - , - i64, - i64); - -define @intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma -; CHECK-NEXT: vnmsac.vx v8, a0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv32i8.i8( - %0, - i8 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv32i8.i8( - , - i8, - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu -; CHECK-NEXT: vnmsac.vx v8, a0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv32i8.i8( - %0, - i8 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv1i16.i16( - , - i16, - , - i64, - i64); - -define @intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma -; CHECK-NEXT: vnmsac.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv1i16.i16( - %0, - i16 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv1i16.i16( - , - i16, - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vnmsac.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv1i16.i16( - %0, - i16 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv2i16.i16( - , - i16, - , - i64, - i64); - -define @intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma -; CHECK-NEXT: vnmsac.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv2i16.i16( - %0, - i16 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv2i16.i16( - , - i16, - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vnmsac.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv2i16.i16( - %0, - i16 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv4i16.i16( - , - i16, - , - i64, - i64); - -define @intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma -; CHECK-NEXT: vnmsac.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv4i16.i16( - %0, - i16 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv4i16.i16( - , - i16, - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vnmsac.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv4i16.i16( - %0, - i16 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv8i16.i16( - , - i16, - , - i64, - i64); - -define @intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma -; CHECK-NEXT: vnmsac.vx v8, a0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv8i16.i16( - %0, - i16 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv8i16.i16( - , - i16, - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vnmsac.vx v8, a0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv8i16.i16( - %0, - i16 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv16i16.i16( - , - i16, - , - i64, - i64); - -define @intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma -; CHECK-NEXT: vnmsac.vx v8, a0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv16i16.i16( - %0, - i16 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv16i16.i16( - , - i16, - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vnmsac.vx v8, a0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv16i16.i16( - %0, - i16 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv1i32.i32( - , - i32, - , - i64, - i64); - -define @intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma -; CHECK-NEXT: vnmsac.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv1i32.i32( - %0, - i32 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv1i32.i32( - , - i32, - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vnmsac.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv1i32.i32( - %0, - i32 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv2i32.i32( - , - i32, - , - i64, - i64); - -define @intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma -; CHECK-NEXT: vnmsac.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv2i32.i32( - %0, - i32 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv2i32.i32( - , - i32, - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vnmsac.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv2i32.i32( - %0, - i32 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv4i32.i32( - , - i32, - , - i64, - i64); - -define @intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma -; CHECK-NEXT: vnmsac.vx v8, a0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv4i32.i32( - %0, - i32 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv4i32.i32( - , - i32, - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vnmsac.vx v8, a0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv4i32.i32( - %0, - i32 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv8i32.i32( - , - i32, - , - i64, - i64); - -define @intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma -; CHECK-NEXT: vnmsac.vx v8, a0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv8i32.i32( - %0, - i32 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv8i32.i32( - , - i32, - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vnmsac.vx v8, a0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv8i32.i32( - %0, - i32 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv1i64.i64( - , - i64, - , - i64, - i64); - -define @intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, ma -; CHECK-NEXT: vnmsac.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv1i64.i64( - %0, - i64 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv1i64.i64( - , - i64, - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu -; CHECK-NEXT: vnmsac.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv1i64.i64( - %0, - i64 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv2i64.i64( - , - i64, - , - i64, - i64); - -define @intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, ma -; CHECK-NEXT: vnmsac.vx v8, a0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv2i64.i64( - %0, - i64 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv2i64.i64( - , - i64, - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu -; CHECK-NEXT: vnmsac.vx v8, a0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv2i64.i64( - %0, - i64 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.nxv4i64.i64( - , - i64, - , - i64, - i64); - -define @intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, ma -; CHECK-NEXT: vnmsac.vx v8, a0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.nxv4i64.i64( - %0, - i64 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsac.mask.nxv4i64.i64( - , - i64, - , - , - i64, i64); - -define @intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu -; CHECK-NEXT: vnmsac.vx v8, a0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsac.mask.nxv4i64.i64( - %0, - i64 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac.ll similarity index 80% rename from llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vnmsac.ll index 4a8fb8d..563d6c5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac.ll @@ -1,14 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vnmsac.nxv1i8.nxv1i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma @@ -19,7 +21,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -29,9 +31,9 @@ declare @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu @@ -43,7 +45,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -52,10 +54,10 @@ declare @llvm.riscv.vnmsac.nxv2i8.nxv2i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma @@ -66,7 +68,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -76,9 +78,9 @@ declare @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu @@ -90,7 +92,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -99,10 +101,10 @@ declare @llvm.riscv.vnmsac.nxv4i8.nxv4i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma @@ -113,7 +115,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -123,9 +125,9 @@ declare @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu @@ -137,7 +139,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -146,10 +148,10 @@ declare @llvm.riscv.vnmsac.nxv8i8.nxv8i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma @@ -160,7 +162,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -170,9 +172,9 @@ declare @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu @@ -184,7 +186,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -193,10 +195,10 @@ declare @llvm.riscv.vnmsac.nxv16i8.nxv16i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma @@ -207,7 +209,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -217,9 +219,9 @@ declare @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu @@ -231,7 +233,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -240,10 +242,10 @@ declare @llvm.riscv.vnmsac.nxv32i8.nxv32i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma @@ -254,7 +256,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -264,9 +266,9 @@ declare @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu @@ -278,7 +280,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -287,10 +289,10 @@ declare @llvm.riscv.vnmsac.nxv1i16.nxv1i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma @@ -301,7 +303,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -311,9 +313,9 @@ declare @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -325,7 +327,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -334,10 +336,10 @@ declare @llvm.riscv.vnmsac.nxv2i16.nxv2i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma @@ -348,7 +350,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -358,9 +360,9 @@ declare @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -372,7 +374,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -381,10 +383,10 @@ declare @llvm.riscv.vnmsac.nxv4i16.nxv4i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma @@ -395,7 +397,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -405,9 +407,9 @@ declare @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -419,7 +421,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -428,10 +430,10 @@ declare @llvm.riscv.vnmsac.nxv8i16.nxv8i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma @@ -442,7 +444,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -452,9 +454,9 @@ declare @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -466,7 +468,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -475,10 +477,10 @@ declare @llvm.riscv.vnmsac.nxv16i16.nxv16i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma @@ -489,7 +491,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -499,9 +501,9 @@ declare @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -513,7 +515,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -522,10 +524,10 @@ declare @llvm.riscv.vnmsac.nxv1i32.nxv1i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma @@ -536,7 +538,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -546,9 +548,9 @@ declare @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -560,7 +562,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -569,10 +571,10 @@ declare @llvm.riscv.vnmsac.nxv2i32.nxv2i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma @@ -583,7 +585,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -593,9 +595,9 @@ declare @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -607,7 +609,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -616,10 +618,10 @@ declare @llvm.riscv.vnmsac.nxv4i32.nxv4i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma @@ -630,7 +632,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -640,9 +642,9 @@ declare @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -654,7 +656,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -663,10 +665,10 @@ declare @llvm.riscv.vnmsac.nxv8i32.nxv8i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma @@ -677,7 +679,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -687,9 +689,9 @@ declare @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -701,7 +703,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -710,10 +712,10 @@ declare @llvm.riscv.vnmsac.nxv1i64.nxv1i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma @@ -724,7 +726,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -734,9 +736,9 @@ declare @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -748,7 +750,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -757,10 +759,10 @@ declare @llvm.riscv.vnmsac.nxv2i64.nxv2i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma @@ -771,7 +773,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -781,9 +783,9 @@ declare @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -795,7 +797,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -804,10 +806,10 @@ declare @llvm.riscv.vnmsac.nxv4i64.nxv4i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma @@ -818,7 +820,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -828,9 +830,9 @@ declare @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -842,7 +844,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -851,10 +853,10 @@ declare @llvm.riscv.vnmsac.nxv1i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma @@ -865,7 +867,7 @@ entry: %0, i8 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -875,9 +877,9 @@ declare @llvm.riscv.vnmsac.mask.nxv1i8.i8( i8, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu @@ -889,7 +891,7 @@ entry: i8 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -898,10 +900,10 @@ declare @llvm.riscv.vnmsac.nxv2i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma @@ -912,7 +914,7 @@ entry: %0, i8 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -922,9 +924,9 @@ declare @llvm.riscv.vnmsac.mask.nxv2i8.i8( i8, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu @@ -936,7 +938,7 @@ entry: i8 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -945,10 +947,10 @@ declare @llvm.riscv.vnmsac.nxv4i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma @@ -959,7 +961,7 @@ entry: %0, i8 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -969,9 +971,9 @@ declare @llvm.riscv.vnmsac.mask.nxv4i8.i8( i8, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu @@ -983,7 +985,7 @@ entry: i8 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -992,10 +994,10 @@ declare @llvm.riscv.vnmsac.nxv8i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma @@ -1006,7 +1008,7 @@ entry: %0, i8 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1016,9 +1018,9 @@ declare @llvm.riscv.vnmsac.mask.nxv8i8.i8( i8, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu @@ -1030,7 +1032,7 @@ entry: i8 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1039,10 +1041,10 @@ declare @llvm.riscv.vnmsac.nxv16i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma @@ -1053,7 +1055,7 @@ entry: %0, i8 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1063,9 +1065,9 @@ declare @llvm.riscv.vnmsac.mask.nxv16i8.i8( i8, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu @@ -1077,7 +1079,7 @@ entry: i8 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1086,10 +1088,10 @@ declare @llvm.riscv.vnmsac.nxv32i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma @@ -1100,7 +1102,7 @@ entry: %0, i8 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1110,9 +1112,9 @@ declare @llvm.riscv.vnmsac.mask.nxv32i8.i8( i8, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu @@ -1124,7 +1126,7 @@ entry: i8 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1133,10 +1135,10 @@ declare @llvm.riscv.vnmsac.nxv1i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma @@ -1147,7 +1149,7 @@ entry: %0, i16 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1157,9 +1159,9 @@ declare @llvm.riscv.vnmsac.mask.nxv1i16.i16( i16, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu @@ -1171,7 +1173,7 @@ entry: i16 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1180,10 +1182,10 @@ declare @llvm.riscv.vnmsac.nxv2i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma @@ -1194,7 +1196,7 @@ entry: %0, i16 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1204,9 +1206,9 @@ declare @llvm.riscv.vnmsac.mask.nxv2i16.i16( i16, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu @@ -1218,7 +1220,7 @@ entry: i16 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1227,10 +1229,10 @@ declare @llvm.riscv.vnmsac.nxv4i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma @@ -1241,7 +1243,7 @@ entry: %0, i16 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1251,9 +1253,9 @@ declare @llvm.riscv.vnmsac.mask.nxv4i16.i16( i16, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu @@ -1265,7 +1267,7 @@ entry: i16 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1274,10 +1276,10 @@ declare @llvm.riscv.vnmsac.nxv8i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma @@ -1288,7 +1290,7 @@ entry: %0, i16 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1298,9 +1300,9 @@ declare @llvm.riscv.vnmsac.mask.nxv8i16.i16( i16, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu @@ -1312,7 +1314,7 @@ entry: i16 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1321,10 +1323,10 @@ declare @llvm.riscv.vnmsac.nxv16i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma @@ -1335,7 +1337,7 @@ entry: %0, i16 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1345,9 +1347,9 @@ declare @llvm.riscv.vnmsac.mask.nxv16i16.i16( i16, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu @@ -1359,7 +1361,7 @@ entry: i16 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1368,10 +1370,10 @@ declare @llvm.riscv.vnmsac.nxv1i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma @@ -1382,7 +1384,7 @@ entry: %0, i32 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1392,9 +1394,9 @@ declare @llvm.riscv.vnmsac.mask.nxv1i32.i32( i32, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu @@ -1406,7 +1408,7 @@ entry: i32 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1415,10 +1417,10 @@ declare @llvm.riscv.vnmsac.nxv2i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma @@ -1429,7 +1431,7 @@ entry: %0, i32 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1439,9 +1441,9 @@ declare @llvm.riscv.vnmsac.mask.nxv2i32.i32( i32, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu @@ -1453,7 +1455,7 @@ entry: i32 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1462,10 +1464,10 @@ declare @llvm.riscv.vnmsac.nxv4i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma @@ -1476,7 +1478,7 @@ entry: %0, i32 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1486,9 +1488,9 @@ declare @llvm.riscv.vnmsac.mask.nxv4i32.i32( i32, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu @@ -1500,7 +1502,7 @@ entry: i32 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1509,10 +1511,10 @@ declare @llvm.riscv.vnmsac.nxv8i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma @@ -1523,7 +1525,7 @@ entry: %0, i32 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1533,9 +1535,9 @@ declare @llvm.riscv.vnmsac.mask.nxv8i32.i32( i32, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu @@ -1547,7 +1549,7 @@ entry: i32 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1556,28 +1558,34 @@ declare @llvm.riscv.vnmsac.nxv1i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, ma -; CHECK-NEXT: vnmsac.vv v8, v10, v9 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma +; RV32-NEXT: vnmsac.vv v8, v10, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; RV64-NEXT: vnmsac.vx v8, a0, v9 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vnmsac.nxv1i64.i64( %0, i64 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1587,28 +1595,34 @@ declare @llvm.riscv.vnmsac.mask.nxv1i64.i64( i64, , , - i32, i32); - -define @intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu -; CHECK-NEXT: vnmsac.vv v8, v10, v9, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, iXLen); + +define @intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV32-NEXT: vnmsac.vv v8, v10, v9, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vnmsac.vx v8, a0, v9, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vnmsac.mask.nxv1i64.i64( %0, i64 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1617,28 +1631,34 @@ declare @llvm.riscv.vnmsac.nxv2i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, ma -; CHECK-NEXT: vnmsac.vv v8, v12, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, ma +; RV32-NEXT: vnmsac.vv v8, v12, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma +; RV64-NEXT: vnmsac.vx v8, a0, v10 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vnmsac.nxv2i64.i64( %0, i64 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1648,28 +1668,34 @@ declare @llvm.riscv.vnmsac.mask.nxv2i64.i64( i64, , , - i32, i32); - -define @intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu -; CHECK-NEXT: vnmsac.vv v8, v12, v10, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, iXLen); + +define @intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; RV32-NEXT: vnmsac.vv v8, v12, v10, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; RV64-NEXT: vnmsac.vx v8, a0, v10, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vnmsac.mask.nxv2i64.i64( %0, i64 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1678,28 +1704,34 @@ declare @llvm.riscv.vnmsac.nxv4i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, ma -; CHECK-NEXT: vnmsac.vv v8, v16, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, ma +; RV32-NEXT: vnmsac.vv v8, v16, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma +; RV64-NEXT: vnmsac.vx v8, a0, v12 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vnmsac.nxv4i64.i64( %0, i64 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1709,28 +1741,34 @@ declare @llvm.riscv.vnmsac.mask.nxv4i64.i64( i64, , , - i32, i32); - -define @intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu -; CHECK-NEXT: vnmsac.vv v8, v16, v12, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, iXLen); + +define @intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; RV32-NEXT: vnmsac.vv v8, v16, v12, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; RV64-NEXT: vnmsac.vx v8, a0, v12, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vnmsac.mask.nxv4i64.i64( %0, i64 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll deleted file mode 100644 index da77b80..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll +++ /dev/null @@ -1,1694 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vnmsub.nxv1i8.nxv1i8( - , - , - , - i64, - i64); - -define @intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma -; CHECK-NEXT: vnmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu -; CHECK-NEXT: vnmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv2i8.nxv2i8( - , - , - , - i64, - i64); - -define @intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma -; CHECK-NEXT: vnmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu -; CHECK-NEXT: vnmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv4i8.nxv4i8( - , - , - , - i64, - i64); - -define @intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma -; CHECK-NEXT: vnmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu -; CHECK-NEXT: vnmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv8i8.nxv8i8( - , - , - , - i64, - i64); - -define @intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma -; CHECK-NEXT: vnmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu -; CHECK-NEXT: vnmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv16i8.nxv16i8( - , - , - , - i64, - i64); - -define @intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma -; CHECK-NEXT: vnmsub.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu -; CHECK-NEXT: vnmsub.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv32i8.nxv32i8( - , - , - , - i64, - i64); - -define @intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma -; CHECK-NEXT: vnmsub.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu -; CHECK-NEXT: vnmsub.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv1i16.nxv1i16( - , - , - , - i64, - i64); - -define @intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma -; CHECK-NEXT: vnmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vnmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv2i16.nxv2i16( - , - , - , - i64, - i64); - -define @intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma -; CHECK-NEXT: vnmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vnmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv4i16.nxv4i16( - , - , - , - i64, - i64); - -define @intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma -; CHECK-NEXT: vnmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vnmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv8i16.nxv8i16( - , - , - , - i64, - i64); - -define @intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma -; CHECK-NEXT: vnmsub.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vnmsub.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv16i16.nxv16i16( - , - , - , - i64, - i64); - -define @intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma -; CHECK-NEXT: vnmsub.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vnmsub.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv1i32.nxv1i32( - , - , - , - i64, - i64); - -define @intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma -; CHECK-NEXT: vnmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vnmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv2i32.nxv2i32( - , - , - , - i64, - i64); - -define @intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma -; CHECK-NEXT: vnmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vnmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv4i32.nxv4i32( - , - , - , - i64, - i64); - -define @intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma -; CHECK-NEXT: vnmsub.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vnmsub.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv8i32.nxv8i32( - , - , - , - i64, - i64); - -define @intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma -; CHECK-NEXT: vnmsub.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vnmsub.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv1i64.nxv1i64( - , - , - , - i64, - i64); - -define @intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma -; CHECK-NEXT: vnmsub.vv v8, v9, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vnmsub.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv2i64.nxv2i64( - , - , - , - i64, - i64); - -define @intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma -; CHECK-NEXT: vnmsub.vv v8, v10, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vnmsub.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv4i64.nxv4i64( - , - , - , - i64, - i64); - -define @intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma -; CHECK-NEXT: vnmsub.vv v8, v12, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64( - %0, - %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64( - , - , - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vnmsub.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64( - %0, - %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv1i8.i8( - , - i8, - , - i64, - i64); - -define @intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma -; CHECK-NEXT: vnmsub.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv1i8.i8( - %0, - i8 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv1i8.i8( - , - i8, - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu -; CHECK-NEXT: vnmsub.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv1i8.i8( - %0, - i8 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv2i8.i8( - , - i8, - , - i64, - i64); - -define @intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma -; CHECK-NEXT: vnmsub.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv2i8.i8( - %0, - i8 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv2i8.i8( - , - i8, - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu -; CHECK-NEXT: vnmsub.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv2i8.i8( - %0, - i8 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv4i8.i8( - , - i8, - , - i64, - i64); - -define @intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma -; CHECK-NEXT: vnmsub.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv4i8.i8( - %0, - i8 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv4i8.i8( - , - i8, - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu -; CHECK-NEXT: vnmsub.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv4i8.i8( - %0, - i8 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv8i8.i8( - , - i8, - , - i64, - i64); - -define @intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma -; CHECK-NEXT: vnmsub.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv8i8.i8( - %0, - i8 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv8i8.i8( - , - i8, - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu -; CHECK-NEXT: vnmsub.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv8i8.i8( - %0, - i8 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv16i8.i8( - , - i8, - , - i64, - i64); - -define @intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma -; CHECK-NEXT: vnmsub.vx v8, a0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv16i8.i8( - %0, - i8 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv16i8.i8( - , - i8, - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu -; CHECK-NEXT: vnmsub.vx v8, a0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv16i8.i8( - %0, - i8 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv32i8.i8( - , - i8, - , - i64, - i64); - -define @intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma -; CHECK-NEXT: vnmsub.vx v8, a0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv32i8.i8( - %0, - i8 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv32i8.i8( - , - i8, - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu -; CHECK-NEXT: vnmsub.vx v8, a0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv32i8.i8( - %0, - i8 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv1i16.i16( - , - i16, - , - i64, - i64); - -define @intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma -; CHECK-NEXT: vnmsub.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv1i16.i16( - %0, - i16 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv1i16.i16( - , - i16, - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vnmsub.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv1i16.i16( - %0, - i16 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv2i16.i16( - , - i16, - , - i64, - i64); - -define @intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma -; CHECK-NEXT: vnmsub.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv2i16.i16( - %0, - i16 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv2i16.i16( - , - i16, - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vnmsub.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv2i16.i16( - %0, - i16 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv4i16.i16( - , - i16, - , - i64, - i64); - -define @intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma -; CHECK-NEXT: vnmsub.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv4i16.i16( - %0, - i16 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv4i16.i16( - , - i16, - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vnmsub.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv4i16.i16( - %0, - i16 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv8i16.i16( - , - i16, - , - i64, - i64); - -define @intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma -; CHECK-NEXT: vnmsub.vx v8, a0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv8i16.i16( - %0, - i16 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv8i16.i16( - , - i16, - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vnmsub.vx v8, a0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv8i16.i16( - %0, - i16 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv16i16.i16( - , - i16, - , - i64, - i64); - -define @intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma -; CHECK-NEXT: vnmsub.vx v8, a0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv16i16.i16( - %0, - i16 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv16i16.i16( - , - i16, - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vnmsub.vx v8, a0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv16i16.i16( - %0, - i16 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv1i32.i32( - , - i32, - , - i64, - i64); - -define @intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma -; CHECK-NEXT: vnmsub.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv1i32.i32( - %0, - i32 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv1i32.i32( - , - i32, - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vnmsub.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv1i32.i32( - %0, - i32 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv2i32.i32( - , - i32, - , - i64, - i64); - -define @intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma -; CHECK-NEXT: vnmsub.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv2i32.i32( - %0, - i32 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv2i32.i32( - , - i32, - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vnmsub.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv2i32.i32( - %0, - i32 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv4i32.i32( - , - i32, - , - i64, - i64); - -define @intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma -; CHECK-NEXT: vnmsub.vx v8, a0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv4i32.i32( - %0, - i32 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv4i32.i32( - , - i32, - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vnmsub.vx v8, a0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv4i32.i32( - %0, - i32 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv8i32.i32( - , - i32, - , - i64, - i64); - -define @intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma -; CHECK-NEXT: vnmsub.vx v8, a0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv8i32.i32( - %0, - i32 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv8i32.i32( - , - i32, - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vnmsub.vx v8, a0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv8i32.i32( - %0, - i32 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv1i64.i64( - , - i64, - , - i64, - i64); - -define @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, ma -; CHECK-NEXT: vnmsub.vx v8, a0, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv1i64.i64( - %0, - i64 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv1i64.i64( - , - i64, - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu -; CHECK-NEXT: vnmsub.vx v8, a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv1i64.i64( - %0, - i64 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv2i64.i64( - , - i64, - , - i64, - i64); - -define @intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, ma -; CHECK-NEXT: vnmsub.vx v8, a0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv2i64.i64( - %0, - i64 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv2i64.i64( - , - i64, - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu -; CHECK-NEXT: vnmsub.vx v8, a0, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv2i64.i64( - %0, - i64 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.nxv4i64.i64( - , - i64, - , - i64, - i64); - -define @intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, ma -; CHECK-NEXT: vnmsub.vx v8, a0, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.nxv4i64.i64( - %0, - i64 %1, - %2, - i64 %3, i64 0) - - ret %a -} - -declare @llvm.riscv.vnmsub.mask.nxv4i64.i64( - , - i64, - , - , - i64, i64); - -define @intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu -; CHECK-NEXT: vnmsub.vx v8, a0, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vnmsub.mask.nxv4i64.i64( - %0, - i64 %1, - %2, - %3, - i64 %4, i64 0) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub.ll similarity index 80% rename from llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vnmsub.ll index c24c63f..4007702 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub.ll @@ -1,14 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vnmsub.nxv1i8.nxv1i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma @@ -19,7 +21,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -29,9 +31,9 @@ declare @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu @@ -43,7 +45,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -52,10 +54,10 @@ declare @llvm.riscv.vnmsub.nxv2i8.nxv2i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma @@ -66,7 +68,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -76,9 +78,9 @@ declare @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu @@ -90,7 +92,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -99,10 +101,10 @@ declare @llvm.riscv.vnmsub.nxv4i8.nxv4i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma @@ -113,7 +115,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -123,9 +125,9 @@ declare @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu @@ -137,7 +139,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -146,10 +148,10 @@ declare @llvm.riscv.vnmsub.nxv8i8.nxv8i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma @@ -160,7 +162,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -170,9 +172,9 @@ declare @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu @@ -184,7 +186,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -193,10 +195,10 @@ declare @llvm.riscv.vnmsub.nxv16i8.nxv16i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma @@ -207,7 +209,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -217,9 +219,9 @@ declare @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu @@ -231,7 +233,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -240,10 +242,10 @@ declare @llvm.riscv.vnmsub.nxv32i8.nxv32i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma @@ -254,7 +256,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -264,9 +266,9 @@ declare @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu @@ -278,7 +280,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -287,10 +289,10 @@ declare @llvm.riscv.vnmsub.nxv1i16.nxv1i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma @@ -301,7 +303,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -311,9 +313,9 @@ declare @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -325,7 +327,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -334,10 +336,10 @@ declare @llvm.riscv.vnmsub.nxv2i16.nxv2i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma @@ -348,7 +350,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -358,9 +360,9 @@ declare @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -372,7 +374,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -381,10 +383,10 @@ declare @llvm.riscv.vnmsub.nxv4i16.nxv4i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma @@ -395,7 +397,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -405,9 +407,9 @@ declare @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -419,7 +421,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -428,10 +430,10 @@ declare @llvm.riscv.vnmsub.nxv8i16.nxv8i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma @@ -442,7 +444,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -452,9 +454,9 @@ declare @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -466,7 +468,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -475,10 +477,10 @@ declare @llvm.riscv.vnmsub.nxv16i16.nxv16i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma @@ -489,7 +491,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -499,9 +501,9 @@ declare @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -513,7 +515,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -522,10 +524,10 @@ declare @llvm.riscv.vnmsub.nxv1i32.nxv1i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma @@ -536,7 +538,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -546,9 +548,9 @@ declare @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -560,7 +562,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -569,10 +571,10 @@ declare @llvm.riscv.vnmsub.nxv2i32.nxv2i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma @@ -583,7 +585,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -593,9 +595,9 @@ declare @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -607,7 +609,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -616,10 +618,10 @@ declare @llvm.riscv.vnmsub.nxv4i32.nxv4i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma @@ -630,7 +632,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -640,9 +642,9 @@ declare @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -654,7 +656,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -663,10 +665,10 @@ declare @llvm.riscv.vnmsub.nxv8i32.nxv8i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma @@ -677,7 +679,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -687,9 +689,9 @@ declare @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -701,7 +703,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -710,10 +712,10 @@ declare @llvm.riscv.vnmsub.nxv1i64.nxv1i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma @@ -724,7 +726,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -734,9 +736,9 @@ declare @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -748,7 +750,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -757,10 +759,10 @@ declare @llvm.riscv.vnmsub.nxv2i64.nxv2i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma @@ -771,7 +773,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -781,9 +783,9 @@ declare @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -795,7 +797,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -804,10 +806,10 @@ declare @llvm.riscv.vnmsub.nxv4i64.nxv4i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma @@ -818,7 +820,7 @@ entry: %0, %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -828,9 +830,9 @@ declare @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64( , , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -842,7 +844,7 @@ entry: %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -851,10 +853,10 @@ declare @llvm.riscv.vnmsub.nxv1i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma @@ -865,7 +867,7 @@ entry: %0, i8 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -875,9 +877,9 @@ declare @llvm.riscv.vnmsub.mask.nxv1i8.i8( i8, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu @@ -889,7 +891,7 @@ entry: i8 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -898,10 +900,10 @@ declare @llvm.riscv.vnmsub.nxv2i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma @@ -912,7 +914,7 @@ entry: %0, i8 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -922,9 +924,9 @@ declare @llvm.riscv.vnmsub.mask.nxv2i8.i8( i8, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu @@ -936,7 +938,7 @@ entry: i8 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -945,10 +947,10 @@ declare @llvm.riscv.vnmsub.nxv4i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma @@ -959,7 +961,7 @@ entry: %0, i8 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -969,9 +971,9 @@ declare @llvm.riscv.vnmsub.mask.nxv4i8.i8( i8, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu @@ -983,7 +985,7 @@ entry: i8 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -992,10 +994,10 @@ declare @llvm.riscv.vnmsub.nxv8i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma @@ -1006,7 +1008,7 @@ entry: %0, i8 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1016,9 +1018,9 @@ declare @llvm.riscv.vnmsub.mask.nxv8i8.i8( i8, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu @@ -1030,7 +1032,7 @@ entry: i8 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1039,10 +1041,10 @@ declare @llvm.riscv.vnmsub.nxv16i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma @@ -1053,7 +1055,7 @@ entry: %0, i8 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1063,9 +1065,9 @@ declare @llvm.riscv.vnmsub.mask.nxv16i8.i8( i8, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu @@ -1077,7 +1079,7 @@ entry: i8 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1086,10 +1088,10 @@ declare @llvm.riscv.vnmsub.nxv32i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma @@ -1100,7 +1102,7 @@ entry: %0, i8 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1110,9 +1112,9 @@ declare @llvm.riscv.vnmsub.mask.nxv32i8.i8( i8, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu @@ -1124,7 +1126,7 @@ entry: i8 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1133,10 +1135,10 @@ declare @llvm.riscv.vnmsub.nxv1i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma @@ -1147,7 +1149,7 @@ entry: %0, i16 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1157,9 +1159,9 @@ declare @llvm.riscv.vnmsub.mask.nxv1i16.i16( i16, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu @@ -1171,7 +1173,7 @@ entry: i16 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1180,10 +1182,10 @@ declare @llvm.riscv.vnmsub.nxv2i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma @@ -1194,7 +1196,7 @@ entry: %0, i16 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1204,9 +1206,9 @@ declare @llvm.riscv.vnmsub.mask.nxv2i16.i16( i16, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu @@ -1218,7 +1220,7 @@ entry: i16 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1227,10 +1229,10 @@ declare @llvm.riscv.vnmsub.nxv4i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma @@ -1241,7 +1243,7 @@ entry: %0, i16 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1251,9 +1253,9 @@ declare @llvm.riscv.vnmsub.mask.nxv4i16.i16( i16, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu @@ -1265,7 +1267,7 @@ entry: i16 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1274,10 +1276,10 @@ declare @llvm.riscv.vnmsub.nxv8i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma @@ -1288,7 +1290,7 @@ entry: %0, i16 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1298,9 +1300,9 @@ declare @llvm.riscv.vnmsub.mask.nxv8i16.i16( i16, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu @@ -1312,7 +1314,7 @@ entry: i16 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1321,10 +1323,10 @@ declare @llvm.riscv.vnmsub.nxv16i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma @@ -1335,7 +1337,7 @@ entry: %0, i16 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1345,9 +1347,9 @@ declare @llvm.riscv.vnmsub.mask.nxv16i16.i16( i16, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu @@ -1359,7 +1361,7 @@ entry: i16 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1368,10 +1370,10 @@ declare @llvm.riscv.vnmsub.nxv1i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma @@ -1382,7 +1384,7 @@ entry: %0, i32 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1392,9 +1394,9 @@ declare @llvm.riscv.vnmsub.mask.nxv1i32.i32( i32, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu @@ -1406,7 +1408,7 @@ entry: i32 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1415,10 +1417,10 @@ declare @llvm.riscv.vnmsub.nxv2i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma @@ -1429,7 +1431,7 @@ entry: %0, i32 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1439,9 +1441,9 @@ declare @llvm.riscv.vnmsub.mask.nxv2i32.i32( i32, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu @@ -1453,7 +1455,7 @@ entry: i32 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1462,10 +1464,10 @@ declare @llvm.riscv.vnmsub.nxv4i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma @@ -1476,7 +1478,7 @@ entry: %0, i32 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1486,9 +1488,9 @@ declare @llvm.riscv.vnmsub.mask.nxv4i32.i32( i32, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu @@ -1500,7 +1502,7 @@ entry: i32 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1509,10 +1511,10 @@ declare @llvm.riscv.vnmsub.nxv8i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma @@ -1523,7 +1525,7 @@ entry: %0, i32 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1533,9 +1535,9 @@ declare @llvm.riscv.vnmsub.mask.nxv8i32.i32( i32, , , - i32, i32); + iXLen, iXLen); -define @intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu @@ -1547,7 +1549,7 @@ entry: i32 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1556,28 +1558,34 @@ declare @llvm.riscv.vnmsub.nxv1i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, ma -; CHECK-NEXT: vnmsub.vv v8, v10, v9 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma +; RV32-NEXT: vnmsub.vv v8, v10, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; RV64-NEXT: vnmsub.vx v8, a0, v9 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vnmsub.nxv1i64.i64( %0, i64 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1587,28 +1595,34 @@ declare @llvm.riscv.vnmsub.mask.nxv1i64.i64( i64, , , - i32, i32); - -define @intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu -; CHECK-NEXT: vnmsub.vv v8, v10, v9, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, iXLen); + +define @intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV32-NEXT: vnmsub.vv v8, v10, v9, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vnmsub.vx v8, a0, v9, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vnmsub.mask.nxv1i64.i64( %0, i64 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1617,28 +1631,34 @@ declare @llvm.riscv.vnmsub.nxv2i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, ma -; CHECK-NEXT: vnmsub.vv v8, v12, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, ma +; RV32-NEXT: vnmsub.vv v8, v12, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma +; RV64-NEXT: vnmsub.vx v8, a0, v10 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vnmsub.nxv2i64.i64( %0, i64 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1648,28 +1668,34 @@ declare @llvm.riscv.vnmsub.mask.nxv2i64.i64( i64, , , - i32, i32); - -define @intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu -; CHECK-NEXT: vnmsub.vv v8, v12, v10, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, iXLen); + +define @intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; RV32-NEXT: vnmsub.vv v8, v12, v10, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; RV64-NEXT: vnmsub.vx v8, a0, v10, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vnmsub.mask.nxv2i64.i64( %0, i64 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } @@ -1678,28 +1704,34 @@ declare @llvm.riscv.vnmsub.nxv4i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, ma -; CHECK-NEXT: vnmsub.vv v8, v16, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, ma +; RV32-NEXT: vnmsub.vv v8, v16, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma +; RV64-NEXT: vnmsub.vx v8, a0, v12 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vnmsub.nxv4i64.i64( %0, i64 %1, %2, - i32 %3, i32 0) + iXLen %3, iXLen 0) ret %a } @@ -1709,28 +1741,34 @@ declare @llvm.riscv.vnmsub.mask.nxv4i64.i64( i64, , , - i32, i32); - -define @intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu -; CHECK-NEXT: vnmsub.vv v8, v16, v12, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, iXLen); + +define @intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; RV32-NEXT: vnmsub.vv v8, v16, v12, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; RV64-NEXT: vnmsub.vx v8, a0, v12, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vnmsub.mask.nxv4i64.i64( %0, i64 %1, %2, %3, - i32 %4, i32 0) + iXLen %4, iXLen 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll deleted file mode 100644 index 14dddc9..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll +++ /dev/null @@ -1,2848 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vor.nxv1i8.nxv1i8( - , - , - , - i32); - -define @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vor.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv1i8.nxv1i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv1i8.nxv1i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vor.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv2i8.nxv2i8( - , - , - , - i32); - -define @intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vor.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv2i8.nxv2i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv2i8.nxv2i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vor.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv4i8.nxv4i8( - , - , - , - i32); - -define @intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vor.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv4i8.nxv4i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv4i8.nxv4i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vor.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv8i8.nxv8i8( - , - , - , - i32); - -define @intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vor.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv8i8.nxv8i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv8i8.nxv8i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vor.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv16i8.nxv16i8( - , - , - , - i32); - -define @intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vor.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv16i8.nxv16i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv16i8.nxv16i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vor.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv32i8.nxv32i8( - , - , - , - i32); - -define @intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vor.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv32i8.nxv32i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv32i8.nxv32i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vor.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv64i8.nxv64i8( - , - , - , - i32); - -define @intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vor.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv64i8.nxv64i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv64i8.nxv64i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vor.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv1i16.nxv1i16( - , - , - , - i32); - -define @intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vor.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv1i16.nxv1i16( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv1i16.nxv1i16( - , - , - , - , - i32, - i32); - -define @intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vor.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv2i16.nxv2i16( - , - , - , - i32); - -define @intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vor.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv2i16.nxv2i16( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv2i16.nxv2i16( - , - , - , - , - i32, - i32); - -define @intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vor.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv4i16.nxv4i16( - , - , - , - i32); - -define @intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vor.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv4i16.nxv4i16( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv4i16.nxv4i16( - , - , - , - , - i32, - i32); - -define @intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vor.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv8i16.nxv8i16( - , - , - , - i32); - -define @intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vor.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv8i16.nxv8i16( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv8i16.nxv8i16( - , - , - , - , - i32, - i32); - -define @intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vor.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv16i16.nxv16i16( - , - , - , - i32); - -define @intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vor.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv16i16.nxv16i16( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv16i16.nxv16i16( - , - , - , - , - i32, - i32); - -define @intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vor.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv32i16.nxv32i16( - , - , - , - i32); - -define @intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vor.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv32i16.nxv32i16( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv32i16.nxv32i16( - , - , - , - , - i32, - i32); - -define @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vor.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv1i32.nxv1i32( - , - , - , - i32); - -define @intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vor.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv1i32.nxv1i32( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv1i32.nxv1i32( - , - , - , - , - i32, - i32); - -define @intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vor.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv2i32.nxv2i32( - , - , - , - i32); - -define @intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vor.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv2i32.nxv2i32( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv2i32.nxv2i32( - , - , - , - , - i32, - i32); - -define @intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vor.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv4i32.nxv4i32( - , - , - , - i32); - -define @intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vor.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv4i32.nxv4i32( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv4i32.nxv4i32( - , - , - , - , - i32, - i32); - -define @intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vor.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv8i32.nxv8i32( - , - , - , - i32); - -define @intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vor.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv8i32.nxv8i32( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv8i32.nxv8i32( - , - , - , - , - i32, - i32); - -define @intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vor.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv16i32.nxv16i32( - , - , - , - i32); - -define @intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vor.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv16i32.nxv16i32( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv16i32.nxv16i32( - , - , - , - , - i32, - i32); - -define @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vor.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv1i64.nxv1i64( - , - , - , - i32); - -define @intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vor.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv1i64.nxv1i64( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv1i64.nxv1i64( - , - , - , - , - i32, - i32); - -define @intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vor.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv2i64.nxv2i64( - , - , - , - i32); - -define @intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vor.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv2i64.nxv2i64( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv2i64.nxv2i64( - , - , - , - , - i32, - i32); - -define @intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vor.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv4i64.nxv4i64( - , - , - , - i32); - -define @intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vor.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv4i64.nxv4i64( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv4i64.nxv4i64( - , - , - , - , - i32, - i32); - -define @intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vor.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv8i64.nxv8i64( - , - , - , - i32); - -define @intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vor.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv8i64.nxv8i64( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv8i64.nxv8i64( - , - , - , - , - i32, - i32); - -define @intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vor.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv1i8.i8( - , - , - i8, - i32); - -define @intrinsic_vor_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vx_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv1i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv1i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vor.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv1i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv2i8.i8( - , - , - i8, - i32); - -define @intrinsic_vor_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vx_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv2i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv2i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vor.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv2i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv4i8.i8( - , - , - i8, - i32); - -define @intrinsic_vor_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vx_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv4i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv4i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vor.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv4i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv8i8.i8( - , - , - i8, - i32); - -define @intrinsic_vor_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vx_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv8i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv8i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vor.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv8i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv16i8.i8( - , - , - i8, - i32); - -define @intrinsic_vor_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vx_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv16i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv16i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vor.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv16i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv32i8.i8( - , - , - i8, - i32); - -define @intrinsic_vor_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vx_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv32i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv32i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vor.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv32i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv64i8.i8( - , - , - i8, - i32); - -define @intrinsic_vor_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vx_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv64i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv64i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vor.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv64i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv1i16.i16( - , - , - i16, - i32); - -define @intrinsic_vor_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vx_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv1i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv1i16.i16( - , - , - i16, - , - i32, - i32); - -define @intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vor.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv1i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv2i16.i16( - , - , - i16, - i32); - -define @intrinsic_vor_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vx_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv2i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv2i16.i16( - , - , - i16, - , - i32, - i32); - -define @intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vor.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv2i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv4i16.i16( - , - , - i16, - i32); - -define @intrinsic_vor_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vx_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv4i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv4i16.i16( - , - , - i16, - , - i32, - i32); - -define @intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vor.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv4i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv8i16.i16( - , - , - i16, - i32); - -define @intrinsic_vor_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vx_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv8i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv8i16.i16( - , - , - i16, - , - i32, - i32); - -define @intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vor.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv8i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv16i16.i16( - , - , - i16, - i32); - -define @intrinsic_vor_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vx_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv16i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv16i16.i16( - , - , - i16, - , - i32, - i32); - -define @intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vor.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv16i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv32i16.i16( - , - , - i16, - i32); - -define @intrinsic_vor_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vx_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv32i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv32i16.i16( - , - , - i16, - , - i32, - i32); - -define @intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vor.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv32i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv1i32.i32( - , - , - i32, - i32); - -define @intrinsic_vor_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vx_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv1i32.i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv1i32.i32( - , - , - i32, - , - i32, - i32); - -define @intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vor.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv1i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv2i32.i32( - , - , - i32, - i32); - -define @intrinsic_vor_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vx_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv2i32.i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv2i32.i32( - , - , - i32, - , - i32, - i32); - -define @intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vor.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv2i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv4i32.i32( - , - , - i32, - i32); - -define @intrinsic_vor_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vx_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv4i32.i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv4i32.i32( - , - , - i32, - , - i32, - i32); - -define @intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vor.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv4i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv8i32.i32( - , - , - i32, - i32); - -define @intrinsic_vor_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vx_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv8i32.i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv8i32.i32( - , - , - i32, - , - i32, - i32); - -define @intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vor.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv8i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv16i32.i32( - , - , - i32, - i32); - -define @intrinsic_vor_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vx_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv16i32.i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv16i32.i32( - , - , - i32, - , - i32, - i32); - -define @intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vor.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv16i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv1i64.i64( - , - , - i64, - i32); - -define @intrinsic_vor_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vor.vv v8, v8, v9 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv1i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv1i64.i64( - , - , - i64, - , - i32, - i32); - -define @intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vor.vv v8, v9, v10, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv2i64.i64( - , - , - i64, - i32); - -define @intrinsic_vor_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vor.vv v8, v8, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv2i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv2i64.i64( - , - , - i64, - , - i32, - i32); - -define @intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vor.vv v8, v10, v12, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv4i64.i64( - , - , - i64, - i32); - -define @intrinsic_vor_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vor.vv v8, v8, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv4i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv4i64.i64( - , - , - i64, - , - i32, - i32); - -define @intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vor.vv v8, v12, v16, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vor.nxv8i64.i64( - , - , - i64, - i32); - -define @intrinsic_vor_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vor.vv v8, v8, v16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv8i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vor.mask.nxv8i64.i64( - , - , - i64, - , - i32, - i32); - -define @intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vor.vv v8, v16, v24, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv8i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -define @intrinsic_vor_vi_nxv1i8_nxv1i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vor_vi_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv1i8.i8( - undef, - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vor.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv1i8.i8( - %0, - %1, - i8 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vor_vi_nxv2i8_nxv2i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vor_vi_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv2i8.i8( - undef, - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vor.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv2i8.i8( - %0, - %1, - i8 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vor_vi_nxv4i8_nxv4i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vor_vi_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv4i8.i8( - undef, - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vor.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv4i8.i8( - %0, - %1, - i8 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vor_vi_nxv8i8_nxv8i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vor_vi_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv8i8.i8( - undef, - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vor.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv8i8.i8( - %0, - %1, - i8 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vor_vi_nxv16i8_nxv16i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vor_vi_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv16i8.i8( - undef, - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vor.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv16i8.i8( - %0, - %1, - i8 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vor_vi_nxv32i8_nxv32i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vor_vi_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv32i8.i8( - undef, - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vor.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv32i8.i8( - %0, - %1, - i8 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vor_vi_nxv64i8_nxv64i8_i8( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vor_vi_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv64i8.i8( - undef, - %0, - i8 9, - i32 %1) - - ret %a -} - -define @intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vor.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv64i8.i8( - %0, - %1, - i8 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vor_vi_nxv1i16_nxv1i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vor_vi_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv1i16.i16( - undef, - %0, - i16 9, - i32 %1) - - ret %a -} - -define @intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vor.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv1i16.i16( - %0, - %1, - i16 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vor_vi_nxv2i16_nxv2i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vor_vi_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv2i16.i16( - undef, - %0, - i16 9, - i32 %1) - - ret %a -} - -define @intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vor.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv2i16.i16( - %0, - %1, - i16 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vor_vi_nxv4i16_nxv4i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vor_vi_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv4i16.i16( - undef, - %0, - i16 9, - i32 %1) - - ret %a -} - -define @intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vor.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv4i16.i16( - %0, - %1, - i16 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vor_vi_nxv8i16_nxv8i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vor_vi_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv8i16.i16( - undef, - %0, - i16 9, - i32 %1) - - ret %a -} - -define @intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vor.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv8i16.i16( - %0, - %1, - i16 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vor_vi_nxv16i16_nxv16i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vor_vi_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv16i16.i16( - undef, - %0, - i16 9, - i32 %1) - - ret %a -} - -define @intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vor.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv16i16.i16( - %0, - %1, - i16 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vor_vi_nxv32i16_nxv32i16_i16( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vor_vi_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv32i16.i16( - undef, - %0, - i16 9, - i32 %1) - - ret %a -} - -define @intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vor.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv32i16.i16( - %0, - %1, - i16 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vor_vi_nxv1i32_nxv1i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vor_vi_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv1i32.i32( - undef, - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vor.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv1i32.i32( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vor_vi_nxv2i32_nxv2i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vor_vi_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv2i32.i32( - undef, - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vor.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv2i32.i32( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vor_vi_nxv4i32_nxv4i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vor_vi_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv4i32.i32( - undef, - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vor.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv4i32.i32( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vor_vi_nxv8i32_nxv8i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vor_vi_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv8i32.i32( - undef, - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vor.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv8i32.i32( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vor_vi_nxv16i32_nxv16i32_i32( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vor_vi_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv16i32.i32( - undef, - %0, - i32 9, - i32 %1) - - ret %a -} - -define @intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vor.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv16i32.i32( - %0, - %1, - i32 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vor_vi_nxv1i64_nxv1i64_i64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vor_vi_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv1i64.i64( - undef, - %0, - i64 9, - i32 %1) - - ret %a -} - -define @intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vor.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv1i64.i64( - %0, - %1, - i64 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vor_vi_nxv2i64_nxv2i64_i64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vor_vi_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv2i64.i64( - undef, - %0, - i64 9, - i32 %1) - - ret %a -} - -define @intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vor.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv2i64.i64( - %0, - %1, - i64 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vor_vi_nxv4i64_nxv4i64_i64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vor_vi_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv4i64.i64( - undef, - %0, - i64 9, - i32 %1) - - ret %a -} - -define @intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vor.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv4i64.i64( - %0, - %1, - i64 9, - %2, - i32 %3, i32 1) - - ret %a -} - -define @intrinsic_vor_vi_nxv8i64_nxv8i64_i64( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vor_vi_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.nxv8i64.i64( - undef, - %0, - i64 9, - i32 %1) - - ret %a -} - -define @intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vor.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vor.mask.nxv8i64.i64( - %0, - %1, - i64 9, - %2, - i32 %3, i32 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vor.ll similarity index 83% rename from llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vor.ll index 0c9353c..6229e09 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vor.nxv1i8.nxv1i8( , , , - i64); + iXLen); -define @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -18,7 +20,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -28,10 +30,10 @@ declare @llvm.riscv.vor.mask.nxv1i8.nxv1i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -43,7 +45,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -52,9 +54,9 @@ declare @llvm.riscv.vor.nxv2i8.nxv2i8( , , , - i64); + iXLen); -define @intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -65,7 +67,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -75,10 +77,10 @@ declare @llvm.riscv.vor.mask.nxv2i8.nxv2i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -90,7 +92,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -99,9 +101,9 @@ declare @llvm.riscv.vor.nxv4i8.nxv4i8( , , , - i64); + iXLen); -define @intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -112,7 +114,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -122,10 +124,10 @@ declare @llvm.riscv.vor.mask.nxv4i8.nxv4i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -137,7 +139,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -146,9 +148,9 @@ declare @llvm.riscv.vor.nxv8i8.nxv8i8( , , , - i64); + iXLen); -define @intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -159,7 +161,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -169,10 +171,10 @@ declare @llvm.riscv.vor.mask.nxv8i8.nxv8i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -184,7 +186,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -193,9 +195,9 @@ declare @llvm.riscv.vor.nxv16i8.nxv16i8( , , , - i64); + iXLen); -define @intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -206,7 +208,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -216,10 +218,10 @@ declare @llvm.riscv.vor.mask.nxv16i8.nxv16i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -231,7 +233,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -240,9 +242,9 @@ declare @llvm.riscv.vor.nxv32i8.nxv32i8( , , , - i64); + iXLen); -define @intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -253,7 +255,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -263,10 +265,10 @@ declare @llvm.riscv.vor.mask.nxv32i8.nxv32i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -278,7 +280,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -287,9 +289,9 @@ declare @llvm.riscv.vor.nxv64i8.nxv64i8( , , , - i64); + iXLen); -define @intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma @@ -300,7 +302,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vor.mask.nxv64i8.nxv64i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) @@ -326,7 +328,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -335,9 +337,9 @@ declare @llvm.riscv.vor.nxv1i16.nxv1i16( , , , - i64); + iXLen); -define @intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -348,7 +350,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -358,10 +360,10 @@ declare @llvm.riscv.vor.mask.nxv1i16.nxv1i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -373,7 +375,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -382,9 +384,9 @@ declare @llvm.riscv.vor.nxv2i16.nxv2i16( , , , - i64); + iXLen); -define @intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -395,7 +397,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -405,10 +407,10 @@ declare @llvm.riscv.vor.mask.nxv2i16.nxv2i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -420,7 +422,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -429,9 +431,9 @@ declare @llvm.riscv.vor.nxv4i16.nxv4i16( , , , - i64); + iXLen); -define @intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -442,7 +444,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -452,10 +454,10 @@ declare @llvm.riscv.vor.mask.nxv4i16.nxv4i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -467,7 +469,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -476,9 +478,9 @@ declare @llvm.riscv.vor.nxv8i16.nxv8i16( , , , - i64); + iXLen); -define @intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -489,7 +491,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -499,10 +501,10 @@ declare @llvm.riscv.vor.mask.nxv8i16.nxv8i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -514,7 +516,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -523,9 +525,9 @@ declare @llvm.riscv.vor.nxv16i16.nxv16i16( , , , - i64); + iXLen); -define @intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -536,7 +538,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -546,10 +548,10 @@ declare @llvm.riscv.vor.mask.nxv16i16.nxv16i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -561,7 +563,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -570,9 +572,9 @@ declare @llvm.riscv.vor.nxv32i16.nxv32i16( , , , - i64); + iXLen); -define @intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma @@ -583,7 +585,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -593,10 +595,10 @@ declare @llvm.riscv.vor.mask.nxv32i16.nxv32i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -609,7 +611,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -618,9 +620,9 @@ declare @llvm.riscv.vor.nxv1i32.nxv1i32( , , , - i64); + iXLen); -define @intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -631,7 +633,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -641,10 +643,10 @@ declare @llvm.riscv.vor.mask.nxv1i32.nxv1i32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -656,7 +658,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -665,9 +667,9 @@ declare @llvm.riscv.vor.nxv2i32.nxv2i32( , , , - i64); + iXLen); -define @intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -678,7 +680,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -688,10 +690,10 @@ declare @llvm.riscv.vor.mask.nxv2i32.nxv2i32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -703,7 +705,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -712,9 +714,9 @@ declare @llvm.riscv.vor.nxv4i32.nxv4i32( , , , - i64); + iXLen); -define @intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -725,7 +727,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -735,10 +737,10 @@ declare @llvm.riscv.vor.mask.nxv4i32.nxv4i32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -750,7 +752,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -759,9 +761,9 @@ declare @llvm.riscv.vor.nxv8i32.nxv8i32( , , , - i64); + iXLen); -define @intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -772,7 +774,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -782,10 +784,10 @@ declare @llvm.riscv.vor.mask.nxv8i32.nxv8i32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -797,7 +799,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -806,9 +808,9 @@ declare @llvm.riscv.vor.nxv16i32.nxv16i32( , , , - i64); + iXLen); -define @intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -819,7 +821,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -829,10 +831,10 @@ declare @llvm.riscv.vor.mask.nxv16i32.nxv16i32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -845,7 +847,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -854,9 +856,9 @@ declare @llvm.riscv.vor.nxv1i64.nxv1i64( , , , - i64); + iXLen); -define @intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -867,7 +869,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -877,10 +879,10 @@ declare @llvm.riscv.vor.mask.nxv1i64.nxv1i64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -892,7 +894,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -901,9 +903,9 @@ declare @llvm.riscv.vor.nxv2i64.nxv2i64( , , , - i64); + iXLen); -define @intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -914,7 +916,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -924,10 +926,10 @@ declare @llvm.riscv.vor.mask.nxv2i64.nxv2i64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -939,7 +941,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vor.nxv4i64.nxv4i64( , , , - i64); + iXLen); -define @intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -961,7 +963,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -971,10 +973,10 @@ declare @llvm.riscv.vor.mask.nxv4i64.nxv4i64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -986,7 +988,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -995,9 +997,9 @@ declare @llvm.riscv.vor.nxv8i64.nxv8i64( , , , - i64); + iXLen); -define @intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma @@ -1008,7 +1010,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -1018,10 +1020,10 @@ declare @llvm.riscv.vor.mask.nxv8i64.nxv8i64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -1034,7 +1036,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1043,9 +1045,9 @@ declare @llvm.riscv.vor.nxv1i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vor_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vor_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -1056,7 +1058,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1066,10 +1068,10 @@ declare @llvm.riscv.vor.mask.nxv1i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu @@ -1081,7 +1083,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1090,9 +1092,9 @@ declare @llvm.riscv.vor.nxv2i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vor_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vor_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -1103,7 +1105,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1113,10 +1115,10 @@ declare @llvm.riscv.vor.mask.nxv2i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu @@ -1128,7 +1130,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1137,9 +1139,9 @@ declare @llvm.riscv.vor.nxv4i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vor_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vor_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -1150,7 +1152,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1160,10 +1162,10 @@ declare @llvm.riscv.vor.mask.nxv4i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu @@ -1175,7 +1177,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1184,9 +1186,9 @@ declare @llvm.riscv.vor.nxv8i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vor_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vor_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -1197,7 +1199,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1207,10 +1209,10 @@ declare @llvm.riscv.vor.mask.nxv8i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu @@ -1222,7 +1224,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1231,9 +1233,9 @@ declare @llvm.riscv.vor.nxv16i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vor_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vor_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -1244,7 +1246,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1254,10 +1256,10 @@ declare @llvm.riscv.vor.mask.nxv16i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu @@ -1269,7 +1271,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1278,9 +1280,9 @@ declare @llvm.riscv.vor.nxv32i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vor_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vor_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -1291,7 +1293,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1301,10 +1303,10 @@ declare @llvm.riscv.vor.mask.nxv32i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu @@ -1316,7 +1318,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1325,9 +1327,9 @@ declare @llvm.riscv.vor.nxv64i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vor_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vor_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma @@ -1338,7 +1340,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1348,10 +1350,10 @@ declare @llvm.riscv.vor.mask.nxv64i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu @@ -1363,7 +1365,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1372,9 +1374,9 @@ declare @llvm.riscv.vor.nxv1i16.i16( , , i16, - i64); + iXLen); -define @intrinsic_vor_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vor_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -1385,7 +1387,7 @@ entry: undef, %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1395,10 +1397,10 @@ declare @llvm.riscv.vor.mask.nxv1i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu @@ -1410,7 +1412,7 @@ entry: %1, i16 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1419,9 +1421,9 @@ declare @llvm.riscv.vor.nxv2i16.i16( , , i16, - i64); + iXLen); -define @intrinsic_vor_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vor_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -1432,7 +1434,7 @@ entry: undef, %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1442,10 +1444,10 @@ declare @llvm.riscv.vor.mask.nxv2i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu @@ -1457,7 +1459,7 @@ entry: %1, i16 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1466,9 +1468,9 @@ declare @llvm.riscv.vor.nxv4i16.i16( , , i16, - i64); + iXLen); -define @intrinsic_vor_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vor_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -1479,7 +1481,7 @@ entry: undef, %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1489,10 +1491,10 @@ declare @llvm.riscv.vor.mask.nxv4i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -1504,7 +1506,7 @@ entry: %1, i16 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1513,9 +1515,9 @@ declare @llvm.riscv.vor.nxv8i16.i16( , , i16, - i64); + iXLen); -define @intrinsic_vor_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vor_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -1526,7 +1528,7 @@ entry: undef, %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1536,10 +1538,10 @@ declare @llvm.riscv.vor.mask.nxv8i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -1551,7 +1553,7 @@ entry: %1, i16 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1560,9 +1562,9 @@ declare @llvm.riscv.vor.nxv16i16.i16( , , i16, - i64); + iXLen); -define @intrinsic_vor_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vor_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -1573,7 +1575,7 @@ entry: undef, %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1583,10 +1585,10 @@ declare @llvm.riscv.vor.mask.nxv16i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu @@ -1598,7 +1600,7 @@ entry: %1, i16 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1607,9 +1609,9 @@ declare @llvm.riscv.vor.nxv32i16.i16( , , i16, - i64); + iXLen); -define @intrinsic_vor_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vor_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma @@ -1620,7 +1622,7 @@ entry: undef, %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1630,10 +1632,10 @@ declare @llvm.riscv.vor.mask.nxv32i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu @@ -1645,7 +1647,7 @@ entry: %1, i16 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1654,9 +1656,9 @@ declare @llvm.riscv.vor.nxv1i32.i32( , , i32, - i64); + iXLen); -define @intrinsic_vor_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vor_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -1667,7 +1669,7 @@ entry: undef, %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1677,10 +1679,10 @@ declare @llvm.riscv.vor.mask.nxv1i32.i32( , i32, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu @@ -1692,7 +1694,7 @@ entry: %1, i32 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1701,9 +1703,9 @@ declare @llvm.riscv.vor.nxv2i32.i32( , , i32, - i64); + iXLen); -define @intrinsic_vor_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vor_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -1714,7 +1716,7 @@ entry: undef, %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1724,10 +1726,10 @@ declare @llvm.riscv.vor.mask.nxv2i32.i32( , i32, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu @@ -1739,7 +1741,7 @@ entry: %1, i32 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1748,9 +1750,9 @@ declare @llvm.riscv.vor.nxv4i32.i32( , , i32, - i64); + iXLen); -define @intrinsic_vor_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vor_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -1761,7 +1763,7 @@ entry: undef, %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1771,10 +1773,10 @@ declare @llvm.riscv.vor.mask.nxv4i32.i32( , i32, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -1786,7 +1788,7 @@ entry: %1, i32 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1795,9 +1797,9 @@ declare @llvm.riscv.vor.nxv8i32.i32( , , i32, - i64); + iXLen); -define @intrinsic_vor_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vor_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -1808,7 +1810,7 @@ entry: undef, %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1818,10 +1820,10 @@ declare @llvm.riscv.vor.mask.nxv8i32.i32( , i32, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -1833,7 +1835,7 @@ entry: %1, i32 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1842,9 +1844,9 @@ declare @llvm.riscv.vor.nxv16i32.i32( , , i32, - i64); + iXLen); -define @intrinsic_vor_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vor_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma @@ -1855,7 +1857,7 @@ entry: undef, %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1865,10 +1867,10 @@ declare @llvm.riscv.vor.mask.nxv16i32.i32( , i32, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu @@ -1880,7 +1882,7 @@ entry: %1, i32 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1889,20 +1891,32 @@ declare @llvm.riscv.vor.nxv1i64.i64( , , i64, - i64); - -define @intrinsic_vor_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vor.vx v8, v8, a0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vor_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vor_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vor_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vor.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i64.i64( undef, %0, i64 %1, - i64 %2) + iXLen %2) ret %a } @@ -1912,22 +1926,34 @@ declare @llvm.riscv.vor.mask.nxv1i64.i64( , i64, , - i64, - i64); - -define @intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vor.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vor.vv v8, v9, v10, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vor.vx v8, v9, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vor.mask.nxv1i64.i64( %0, %1, i64 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1936,20 +1962,32 @@ declare @llvm.riscv.vor.nxv2i64.i64( , , i64, - i64); - -define @intrinsic_vor_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vor.vx v8, v8, a0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vor_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vor_vx_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vor_vx_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vor.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i64.i64( undef, %0, i64 %1, - i64 %2) + iXLen %2) ret %a } @@ -1959,22 +1997,34 @@ declare @llvm.riscv.vor.mask.nxv2i64.i64( , i64, , - i64, - i64); - -define @intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vor.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vor.vv v8, v10, v12, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vor.vx v8, v10, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vor.mask.nxv2i64.i64( %0, %1, i64 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1983,20 +2033,32 @@ declare @llvm.riscv.vor.nxv4i64.i64( , , i64, - i64); - -define @intrinsic_vor_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vor.vx v8, v8, a0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vor_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vor_vx_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vor_vx_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vor.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i64.i64( undef, %0, i64 %1, - i64 %2) + iXLen %2) ret %a } @@ -2006,22 +2068,34 @@ declare @llvm.riscv.vor.mask.nxv4i64.i64( , i64, , - i64, - i64); - -define @intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vor.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vor.vv v8, v12, v16, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vor.vx v8, v12, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vor.mask.nxv4i64.i64( %0, %1, i64 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -2030,20 +2104,32 @@ declare @llvm.riscv.vor.nxv8i64.i64( , , i64, - i64); - -define @intrinsic_vor_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vor_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vor.vx v8, v8, a0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vor_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vor_vx_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vor_vx_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vor.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i64.i64( undef, %0, i64 %1, - i64 %2) + iXLen %2) ret %a } @@ -2053,27 +2139,39 @@ declare @llvm.riscv.vor.mask.nxv8i64.i64( , i64, , - i64, - i64); - -define @intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vor.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vor.vv v8, v16, v24, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vor.vx v8, v16, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vor.mask.nxv8i64.i64( %0, %1, i64 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } -define @intrinsic_vor_vi_nxv1i8_nxv1i8_i8( %0, i64 %1) nounwind { +define @intrinsic_vor_vi_nxv1i8_nxv1i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -2084,12 +2182,12 @@ entry: undef, %0, i8 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -2101,12 +2199,12 @@ entry: %1, i8 9, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vor_vi_nxv2i8_nxv2i8_i8( %0, i64 %1) nounwind { +define @intrinsic_vor_vi_nxv2i8_nxv2i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -2117,12 +2215,12 @@ entry: undef, %0, i8 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -2134,12 +2232,12 @@ entry: %1, i8 9, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vor_vi_nxv4i8_nxv4i8_i8( %0, i64 %1) nounwind { +define @intrinsic_vor_vi_nxv4i8_nxv4i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -2150,12 +2248,12 @@ entry: undef, %0, i8 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -2167,12 +2265,12 @@ entry: %1, i8 9, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vor_vi_nxv8i8_nxv8i8_i8( %0, i64 %1) nounwind { +define @intrinsic_vor_vi_nxv8i8_nxv8i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -2183,12 +2281,12 @@ entry: undef, %0, i8 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -2200,12 +2298,12 @@ entry: %1, i8 9, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vor_vi_nxv16i8_nxv16i8_i8( %0, i64 %1) nounwind { +define @intrinsic_vor_vi_nxv16i8_nxv16i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -2216,12 +2314,12 @@ entry: undef, %0, i8 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -2233,12 +2331,12 @@ entry: %1, i8 9, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vor_vi_nxv32i8_nxv32i8_i8( %0, i64 %1) nounwind { +define @intrinsic_vor_vi_nxv32i8_nxv32i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -2249,12 +2347,12 @@ entry: undef, %0, i8 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -2266,12 +2364,12 @@ entry: %1, i8 9, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vor_vi_nxv64i8_nxv64i8_i8( %0, i64 %1) nounwind { +define @intrinsic_vor_vi_nxv64i8_nxv64i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma @@ -2282,12 +2380,12 @@ entry: undef, %0, i8 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu @@ -2299,12 +2397,12 @@ entry: %1, i8 9, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vor_vi_nxv1i16_nxv1i16_i16( %0, i64 %1) nounwind { +define @intrinsic_vor_vi_nxv1i16_nxv1i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -2315,12 +2413,12 @@ entry: undef, %0, i16 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -2332,12 +2430,12 @@ entry: %1, i16 9, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vor_vi_nxv2i16_nxv2i16_i16( %0, i64 %1) nounwind { +define @intrinsic_vor_vi_nxv2i16_nxv2i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -2348,12 +2446,12 @@ entry: undef, %0, i16 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -2365,12 +2463,12 @@ entry: %1, i16 9, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vor_vi_nxv4i16_nxv4i16_i16( %0, i64 %1) nounwind { +define @intrinsic_vor_vi_nxv4i16_nxv4i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -2381,12 +2479,12 @@ entry: undef, %0, i16 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -2398,12 +2496,12 @@ entry: %1, i16 9, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vor_vi_nxv8i16_nxv8i16_i16( %0, i64 %1) nounwind { +define @intrinsic_vor_vi_nxv8i16_nxv8i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -2414,12 +2512,12 @@ entry: undef, %0, i16 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -2431,12 +2529,12 @@ entry: %1, i16 9, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vor_vi_nxv16i16_nxv16i16_i16( %0, i64 %1) nounwind { +define @intrinsic_vor_vi_nxv16i16_nxv16i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -2447,12 +2545,12 @@ entry: undef, %0, i16 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -2464,12 +2562,12 @@ entry: %1, i16 9, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vor_vi_nxv32i16_nxv32i16_i16( %0, i64 %1) nounwind { +define @intrinsic_vor_vi_nxv32i16_nxv32i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma @@ -2480,12 +2578,12 @@ entry: undef, %0, i16 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -2497,12 +2595,12 @@ entry: %1, i16 9, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vor_vi_nxv1i32_nxv1i32_i32( %0, i64 %1) nounwind { +define @intrinsic_vor_vi_nxv1i32_nxv1i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -2513,12 +2611,12 @@ entry: undef, %0, i32 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -2530,12 +2628,12 @@ entry: %1, i32 9, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vor_vi_nxv2i32_nxv2i32_i32( %0, i64 %1) nounwind { +define @intrinsic_vor_vi_nxv2i32_nxv2i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -2546,12 +2644,12 @@ entry: undef, %0, i32 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -2563,12 +2661,12 @@ entry: %1, i32 9, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vor_vi_nxv4i32_nxv4i32_i32( %0, i64 %1) nounwind { +define @intrinsic_vor_vi_nxv4i32_nxv4i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -2579,12 +2677,12 @@ entry: undef, %0, i32 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -2596,12 +2694,12 @@ entry: %1, i32 9, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vor_vi_nxv8i32_nxv8i32_i32( %0, i64 %1) nounwind { +define @intrinsic_vor_vi_nxv8i32_nxv8i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -2612,12 +2710,12 @@ entry: undef, %0, i32 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -2629,12 +2727,12 @@ entry: %1, i32 9, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vor_vi_nxv16i32_nxv16i32_i32( %0, i64 %1) nounwind { +define @intrinsic_vor_vi_nxv16i32_nxv16i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -2645,12 +2743,12 @@ entry: undef, %0, i32 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -2662,12 +2760,12 @@ entry: %1, i32 9, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vor_vi_nxv1i64_nxv1i64_i64( %0, i64 %1) nounwind { +define @intrinsic_vor_vi_nxv1i64_nxv1i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -2678,12 +2776,12 @@ entry: undef, %0, i64 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -2695,12 +2793,12 @@ entry: %1, i64 9, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vor_vi_nxv2i64_nxv2i64_i64( %0, i64 %1) nounwind { +define @intrinsic_vor_vi_nxv2i64_nxv2i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -2711,12 +2809,12 @@ entry: undef, %0, i64 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -2728,12 +2826,12 @@ entry: %1, i64 9, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vor_vi_nxv4i64_nxv4i64_i64( %0, i64 %1) nounwind { +define @intrinsic_vor_vi_nxv4i64_nxv4i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -2744,12 +2842,12 @@ entry: undef, %0, i64 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -2761,12 +2859,12 @@ entry: %1, i64 9, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vor_vi_nxv8i64_nxv8i64_i64( %0, i64 %1) nounwind { +define @intrinsic_vor_vi_nxv8i64_nxv8i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma @@ -2777,12 +2875,12 @@ entry: undef, %0, i64 9, - i64 %1) + iXLen %1) ret %a } -define @intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { +define @intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -2794,7 +2892,7 @@ entry: %1, i64 9, %2, - i64 %3, i64 1) + iXLen %3, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll deleted file mode 100644 index 64f6bb4..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll +++ /dev/null @@ -1,2122 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vrem.nxv1i8.nxv1i8( - , - , - , - i32); - -define @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vrem.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv1i8.nxv1i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv1i8.nxv1i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv1i8.nxv1i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv2i8.nxv2i8( - , - , - , - i32); - -define @intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vrem.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv2i8.nxv2i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv2i8.nxv2i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv2i8.nxv2i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv4i8.nxv4i8( - , - , - , - i32); - -define @intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vrem.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv4i8.nxv4i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv4i8.nxv4i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv4i8.nxv4i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv8i8.nxv8i8( - , - , - , - i32); - -define @intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vrem.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv8i8.nxv8i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv8i8.nxv8i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv8i8.nxv8i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv16i8.nxv16i8( - , - , - , - i32); - -define @intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vrem.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv16i8.nxv16i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv16i8.nxv16i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vrem.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv16i8.nxv16i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv32i8.nxv32i8( - , - , - , - i32); - -define @intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vrem.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv32i8.nxv32i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv32i8.nxv32i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vrem.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv32i8.nxv32i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv64i8.nxv64i8( - , - , - , - i32); - -define @intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vrem.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv64i8.nxv64i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv64i8.nxv64i8( - , - , - , - , - i32, - i32); - -define @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv64i8.nxv64i8( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv1i16.nxv1i16( - , - , - , - i32); - -define @intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vrem.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv1i16.nxv1i16( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv1i16.nxv1i16( - , - , - , - , - i32, - i32); - -define @intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv1i16.nxv1i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv2i16.nxv2i16( - , - , - , - i32); - -define @intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vrem.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv2i16.nxv2i16( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv2i16.nxv2i16( - , - , - , - , - i32, - i32); - -define @intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv2i16.nxv2i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv4i16.nxv4i16( - , - , - , - i32); - -define @intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vrem.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv4i16.nxv4i16( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv4i16.nxv4i16( - , - , - , - , - i32, - i32); - -define @intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv4i16.nxv4i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv8i16.nxv8i16( - , - , - , - i32); - -define @intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vrem.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv8i16.nxv8i16( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv8i16.nxv8i16( - , - , - , - , - i32, - i32); - -define @intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vrem.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv8i16.nxv8i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv16i16.nxv16i16( - , - , - , - i32); - -define @intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vrem.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv16i16.nxv16i16( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv16i16.nxv16i16( - , - , - , - , - i32, - i32); - -define @intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vrem.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv16i16.nxv16i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv32i16.nxv32i16( - , - , - , - i32); - -define @intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vrem.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv32i16.nxv32i16( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv32i16.nxv32i16( - , - , - , - , - i32, - i32); - -define @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv32i16.nxv32i16( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv1i32.nxv1i32( - , - , - , - i32); - -define @intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vrem.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv1i32.nxv1i32( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv1i32.nxv1i32( - , - , - , - , - i32, - i32); - -define @intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv2i32.nxv2i32( - , - , - , - i32); - -define @intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vrem.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv2i32.nxv2i32( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv2i32.nxv2i32( - , - , - , - , - i32, - i32); - -define @intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv2i32.nxv2i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv4i32.nxv4i32( - , - , - , - i32); - -define @intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vrem.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv4i32.nxv4i32( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv4i32.nxv4i32( - , - , - , - , - i32, - i32); - -define @intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vrem.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv4i32.nxv4i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv8i32.nxv8i32( - , - , - , - i32); - -define @intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vrem.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv8i32.nxv8i32( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv8i32.nxv8i32( - , - , - , - , - i32, - i32); - -define @intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vrem.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv8i32.nxv8i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv16i32.nxv16i32( - , - , - , - i32); - -define @intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vrem.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv16i32.nxv16i32( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv16i32.nxv16i32( - , - , - , - , - i32, - i32); - -define @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv16i32.nxv16i32( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv1i64.nxv1i64( - , - , - , - i32); - -define @intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vrem.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv1i64.nxv1i64( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv1i64.nxv1i64( - , - , - , - , - i32, - i32); - -define @intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv1i64.nxv1i64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv2i64.nxv2i64( - , - , - , - i32); - -define @intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vrem.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv2i64.nxv2i64( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv2i64.nxv2i64( - , - , - , - , - i32, - i32); - -define @intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vrem.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv2i64.nxv2i64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv4i64.nxv4i64( - , - , - , - i32); - -define @intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vrem.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv4i64.nxv4i64( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv4i64.nxv4i64( - , - , - , - , - i32, - i32); - -define @intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vrem.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv4i64.nxv4i64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv8i64.nxv8i64( - , - , - , - i32); - -define @intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vrem.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv8i64.nxv8i64( - undef, - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv8i64.nxv8i64( - , - , - , - , - i32, - i32); - -define @intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv8i64.nxv8i64( - %0, - %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv1i8.i8( - , - , - i8, - i32); - -define @intrinsic_vrem_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vx_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vrem.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv1i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv1i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv1i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv2i8.i8( - , - , - i8, - i32); - -define @intrinsic_vrem_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vx_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vrem.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv2i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv2i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv2i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv4i8.i8( - , - , - i8, - i32); - -define @intrinsic_vrem_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vx_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vrem.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv4i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv4i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv4i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv8i8.i8( - , - , - i8, - i32); - -define @intrinsic_vrem_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vx_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vrem.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv8i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv8i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv8i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv16i8.i8( - , - , - i8, - i32); - -define @intrinsic_vrem_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vx_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vrem.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv16i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv16i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vrem.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv16i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv32i8.i8( - , - , - i8, - i32); - -define @intrinsic_vrem_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vx_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vrem.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv32i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv32i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vrem.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv32i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv64i8.i8( - , - , - i8, - i32); - -define @intrinsic_vrem_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vx_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vrem.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv64i8.i8( - undef, - %0, - i8 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv64i8.i8( - , - , - i8, - , - i32, - i32); - -define @intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vrem.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv64i8.i8( - %0, - %1, - i8 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv1i16.i16( - , - , - i16, - i32); - -define @intrinsic_vrem_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vx_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vrem.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv1i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv1i16.i16( - , - , - i16, - , - i32, - i32); - -define @intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv1i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv2i16.i16( - , - , - i16, - i32); - -define @intrinsic_vrem_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vx_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vrem.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv2i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv2i16.i16( - , - , - i16, - , - i32, - i32); - -define @intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv2i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv4i16.i16( - , - , - i16, - i32); - -define @intrinsic_vrem_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vx_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vrem.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv4i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv4i16.i16( - , - , - i16, - , - i32, - i32); - -define @intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv4i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv8i16.i16( - , - , - i16, - i32); - -define @intrinsic_vrem_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vx_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vrem.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv8i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv8i16.i16( - , - , - i16, - , - i32, - i32); - -define @intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vrem.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv8i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv16i16.i16( - , - , - i16, - i32); - -define @intrinsic_vrem_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vx_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vrem.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv16i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv16i16.i16( - , - , - i16, - , - i32, - i32); - -define @intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vrem.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv16i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv32i16.i16( - , - , - i16, - i32); - -define @intrinsic_vrem_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vx_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vrem.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv32i16.i16( - undef, - %0, - i16 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv32i16.i16( - , - , - i16, - , - i32, - i32); - -define @intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vrem.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv32i16.i16( - %0, - %1, - i16 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv1i32.i32( - , - , - i32, - i32); - -define @intrinsic_vrem_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vx_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vrem.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv1i32.i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv1i32.i32( - , - , - i32, - , - i32, - i32); - -define @intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv1i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv2i32.i32( - , - , - i32, - i32); - -define @intrinsic_vrem_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vx_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vrem.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv2i32.i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv2i32.i32( - , - , - i32, - , - i32, - i32); - -define @intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv2i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv4i32.i32( - , - , - i32, - i32); - -define @intrinsic_vrem_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vx_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vrem.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv4i32.i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv4i32.i32( - , - , - i32, - , - i32, - i32); - -define @intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vrem.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv4i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv8i32.i32( - , - , - i32, - i32); - -define @intrinsic_vrem_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vx_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vrem.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv8i32.i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv8i32.i32( - , - , - i32, - , - i32, - i32); - -define @intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vrem.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv8i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv16i32.i32( - , - , - i32, - i32); - -define @intrinsic_vrem_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vx_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vrem.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv16i32.i32( - undef, - %0, - i32 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv16i32.i32( - , - , - i32, - , - i32, - i32); - -define @intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vrem.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv16i32.i32( - %0, - %1, - i32 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv1i64.i64( - , - , - i64, - i32); - -define @intrinsic_vrem_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vrem.vv v8, v8, v9 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv1i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv1i64.i64( - , - , - i64, - , - i32, - i32); - -define @intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv2i64.i64( - , - , - i64, - i32); - -define @intrinsic_vrem_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vrem.vv v8, v8, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv2i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv2i64.i64( - , - , - i64, - , - i32, - i32); - -define @intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vrem.vv v8, v10, v12, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv4i64.i64( - , - , - i64, - i32); - -define @intrinsic_vrem_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vrem.vv v8, v8, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv4i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv4i64.i64( - , - , - i64, - , - i32, - i32); - -define @intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vrem.vv v8, v12, v16, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vrem.nxv8i64.i64( - , - , - i64, - i32); - -define @intrinsic_vrem_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vrem.vv v8, v8, v16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.nxv8i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vrem.mask.nxv8i64.i64( - , - , - i64, - , - i32, - i32); - -define @intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vrem.mask.nxv8i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4, i32 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrem.ll similarity index 80% rename from llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vrem.ll index afca262..30b40f4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vrem.nxv1i8.nxv1i8( , , , - i64); + iXLen); -define @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -18,7 +20,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -28,10 +30,10 @@ declare @llvm.riscv.vrem.mask.nxv1i8.nxv1i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -43,7 +45,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -52,9 +54,9 @@ declare @llvm.riscv.vrem.nxv2i8.nxv2i8( , , , - i64); + iXLen); -define @intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -65,7 +67,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -75,10 +77,10 @@ declare @llvm.riscv.vrem.mask.nxv2i8.nxv2i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -90,7 +92,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -99,9 +101,9 @@ declare @llvm.riscv.vrem.nxv4i8.nxv4i8( , , , - i64); + iXLen); -define @intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -112,7 +114,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -122,10 +124,10 @@ declare @llvm.riscv.vrem.mask.nxv4i8.nxv4i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -137,7 +139,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -146,9 +148,9 @@ declare @llvm.riscv.vrem.nxv8i8.nxv8i8( , , , - i64); + iXLen); -define @intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -159,7 +161,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -169,10 +171,10 @@ declare @llvm.riscv.vrem.mask.nxv8i8.nxv8i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -184,7 +186,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -193,9 +195,9 @@ declare @llvm.riscv.vrem.nxv16i8.nxv16i8( , , , - i64); + iXLen); -define @intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -206,7 +208,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -216,10 +218,10 @@ declare @llvm.riscv.vrem.mask.nxv16i8.nxv16i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -231,7 +233,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -240,9 +242,9 @@ declare @llvm.riscv.vrem.nxv32i8.nxv32i8( , , , - i64); + iXLen); -define @intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -253,7 +255,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -263,10 +265,10 @@ declare @llvm.riscv.vrem.mask.nxv32i8.nxv32i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -278,7 +280,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -287,9 +289,9 @@ declare @llvm.riscv.vrem.nxv64i8.nxv64i8( , , , - i64); + iXLen); -define @intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { +define @intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma @@ -300,7 +302,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vrem.mask.nxv64i8.nxv64i8( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) @@ -326,7 +328,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -335,9 +337,9 @@ declare @llvm.riscv.vrem.nxv1i16.nxv1i16( , , , - i64); + iXLen); -define @intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -348,7 +350,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -358,10 +360,10 @@ declare @llvm.riscv.vrem.mask.nxv1i16.nxv1i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -373,7 +375,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -382,9 +384,9 @@ declare @llvm.riscv.vrem.nxv2i16.nxv2i16( , , , - i64); + iXLen); -define @intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -395,7 +397,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -405,10 +407,10 @@ declare @llvm.riscv.vrem.mask.nxv2i16.nxv2i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -420,7 +422,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -429,9 +431,9 @@ declare @llvm.riscv.vrem.nxv4i16.nxv4i16( , , , - i64); + iXLen); -define @intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -442,7 +444,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -452,10 +454,10 @@ declare @llvm.riscv.vrem.mask.nxv4i16.nxv4i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -467,7 +469,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -476,9 +478,9 @@ declare @llvm.riscv.vrem.nxv8i16.nxv8i16( , , , - i64); + iXLen); -define @intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -489,7 +491,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -499,10 +501,10 @@ declare @llvm.riscv.vrem.mask.nxv8i16.nxv8i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -514,7 +516,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -523,9 +525,9 @@ declare @llvm.riscv.vrem.nxv16i16.nxv16i16( , , , - i64); + iXLen); -define @intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -536,7 +538,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -546,10 +548,10 @@ declare @llvm.riscv.vrem.mask.nxv16i16.nxv16i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -561,7 +563,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -570,9 +572,9 @@ declare @llvm.riscv.vrem.nxv32i16.nxv32i16( , , , - i64); + iXLen); -define @intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { +define @intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma @@ -583,7 +585,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -593,10 +595,10 @@ declare @llvm.riscv.vrem.mask.nxv32i16.nxv32i16( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -609,7 +611,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -618,9 +620,9 @@ declare @llvm.riscv.vrem.nxv1i32.nxv1i32( , , , - i64); + iXLen); -define @intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -631,7 +633,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -641,10 +643,10 @@ declare @llvm.riscv.vrem.mask.nxv1i32.nxv1i32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -656,7 +658,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -665,9 +667,9 @@ declare @llvm.riscv.vrem.nxv2i32.nxv2i32( , , , - i64); + iXLen); -define @intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -678,7 +680,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -688,10 +690,10 @@ declare @llvm.riscv.vrem.mask.nxv2i32.nxv2i32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -703,7 +705,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -712,9 +714,9 @@ declare @llvm.riscv.vrem.nxv4i32.nxv4i32( , , , - i64); + iXLen); -define @intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -725,7 +727,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -735,10 +737,10 @@ declare @llvm.riscv.vrem.mask.nxv4i32.nxv4i32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -750,7 +752,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -759,9 +761,9 @@ declare @llvm.riscv.vrem.nxv8i32.nxv8i32( , , , - i64); + iXLen); -define @intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -772,7 +774,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -782,10 +784,10 @@ declare @llvm.riscv.vrem.mask.nxv8i32.nxv8i32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -797,7 +799,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -806,9 +808,9 @@ declare @llvm.riscv.vrem.nxv16i32.nxv16i32( , , , - i64); + iXLen); -define @intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { +define @intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -819,7 +821,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -829,10 +831,10 @@ declare @llvm.riscv.vrem.mask.nxv16i32.nxv16i32( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -845,7 +847,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -854,9 +856,9 @@ declare @llvm.riscv.vrem.nxv1i64.nxv1i64( , , , - i64); + iXLen); -define @intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -867,7 +869,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -877,10 +879,10 @@ declare @llvm.riscv.vrem.mask.nxv1i64.nxv1i64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -892,7 +894,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -901,9 +903,9 @@ declare @llvm.riscv.vrem.nxv2i64.nxv2i64( , , , - i64); + iXLen); -define @intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -914,7 +916,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -924,10 +926,10 @@ declare @llvm.riscv.vrem.mask.nxv2i64.nxv2i64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -939,7 +941,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vrem.nxv4i64.nxv4i64( , , , - i64); + iXLen); -define @intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -961,7 +963,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -971,10 +973,10 @@ declare @llvm.riscv.vrem.mask.nxv4i64.nxv4i64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -986,7 +988,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -995,9 +997,9 @@ declare @llvm.riscv.vrem.nxv8i64.nxv8i64( , , , - i64); + iXLen); -define @intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { +define @intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma @@ -1008,7 +1010,7 @@ entry: undef, %0, %1, - i64 %2) + iXLen %2) ret %a } @@ -1018,10 +1020,10 @@ declare @llvm.riscv.vrem.mask.nxv8i64.nxv8i64( , , , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -1034,7 +1036,7 @@ entry: %1, %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1043,9 +1045,9 @@ declare @llvm.riscv.vrem.nxv1i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vrem_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vrem_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -1056,7 +1058,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1066,10 +1068,10 @@ declare @llvm.riscv.vrem.mask.nxv1i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu @@ -1081,7 +1083,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1090,9 +1092,9 @@ declare @llvm.riscv.vrem.nxv2i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vrem_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vrem_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -1103,7 +1105,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1113,10 +1115,10 @@ declare @llvm.riscv.vrem.mask.nxv2i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu @@ -1128,7 +1130,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1137,9 +1139,9 @@ declare @llvm.riscv.vrem.nxv4i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vrem_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vrem_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -1150,7 +1152,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1160,10 +1162,10 @@ declare @llvm.riscv.vrem.mask.nxv4i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu @@ -1175,7 +1177,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1184,9 +1186,9 @@ declare @llvm.riscv.vrem.nxv8i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vrem_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vrem_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -1197,7 +1199,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1207,10 +1209,10 @@ declare @llvm.riscv.vrem.mask.nxv8i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu @@ -1222,7 +1224,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1231,9 +1233,9 @@ declare @llvm.riscv.vrem.nxv16i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vrem_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vrem_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -1244,7 +1246,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1254,10 +1256,10 @@ declare @llvm.riscv.vrem.mask.nxv16i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu @@ -1269,7 +1271,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1278,9 +1280,9 @@ declare @llvm.riscv.vrem.nxv32i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vrem_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vrem_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -1291,7 +1293,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1301,10 +1303,10 @@ declare @llvm.riscv.vrem.mask.nxv32i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu @@ -1316,7 +1318,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1325,9 +1327,9 @@ declare @llvm.riscv.vrem.nxv64i8.i8( , , i8, - i64); + iXLen); -define @intrinsic_vrem_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { +define @intrinsic_vrem_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma @@ -1338,7 +1340,7 @@ entry: undef, %0, i8 %1, - i64 %2) + iXLen %2) ret %a } @@ -1348,10 +1350,10 @@ declare @llvm.riscv.vrem.mask.nxv64i8.i8( , i8, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu @@ -1363,7 +1365,7 @@ entry: %1, i8 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1372,9 +1374,9 @@ declare @llvm.riscv.vrem.nxv1i16.i16( , , i16, - i64); + iXLen); -define @intrinsic_vrem_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vrem_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -1385,7 +1387,7 @@ entry: undef, %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1395,10 +1397,10 @@ declare @llvm.riscv.vrem.mask.nxv1i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu @@ -1410,7 +1412,7 @@ entry: %1, i16 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1419,9 +1421,9 @@ declare @llvm.riscv.vrem.nxv2i16.i16( , , i16, - i64); + iXLen); -define @intrinsic_vrem_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vrem_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -1432,7 +1434,7 @@ entry: undef, %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1442,10 +1444,10 @@ declare @llvm.riscv.vrem.mask.nxv2i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu @@ -1457,7 +1459,7 @@ entry: %1, i16 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1466,9 +1468,9 @@ declare @llvm.riscv.vrem.nxv4i16.i16( , , i16, - i64); + iXLen); -define @intrinsic_vrem_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vrem_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -1479,7 +1481,7 @@ entry: undef, %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1489,10 +1491,10 @@ declare @llvm.riscv.vrem.mask.nxv4i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -1504,7 +1506,7 @@ entry: %1, i16 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1513,9 +1515,9 @@ declare @llvm.riscv.vrem.nxv8i16.i16( , , i16, - i64); + iXLen); -define @intrinsic_vrem_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vrem_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -1526,7 +1528,7 @@ entry: undef, %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1536,10 +1538,10 @@ declare @llvm.riscv.vrem.mask.nxv8i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -1551,7 +1553,7 @@ entry: %1, i16 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1560,9 +1562,9 @@ declare @llvm.riscv.vrem.nxv16i16.i16( , , i16, - i64); + iXLen); -define @intrinsic_vrem_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vrem_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -1573,7 +1575,7 @@ entry: undef, %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1583,10 +1585,10 @@ declare @llvm.riscv.vrem.mask.nxv16i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu @@ -1598,7 +1600,7 @@ entry: %1, i16 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1607,9 +1609,9 @@ declare @llvm.riscv.vrem.nxv32i16.i16( , , i16, - i64); + iXLen); -define @intrinsic_vrem_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { +define @intrinsic_vrem_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma @@ -1620,7 +1622,7 @@ entry: undef, %0, i16 %1, - i64 %2) + iXLen %2) ret %a } @@ -1630,10 +1632,10 @@ declare @llvm.riscv.vrem.mask.nxv32i16.i16( , i16, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu @@ -1645,7 +1647,7 @@ entry: %1, i16 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1654,9 +1656,9 @@ declare @llvm.riscv.vrem.nxv1i32.i32( , , i32, - i64); + iXLen); -define @intrinsic_vrem_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vrem_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -1667,7 +1669,7 @@ entry: undef, %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1677,10 +1679,10 @@ declare @llvm.riscv.vrem.mask.nxv1i32.i32( , i32, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu @@ -1692,7 +1694,7 @@ entry: %1, i32 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1701,9 +1703,9 @@ declare @llvm.riscv.vrem.nxv2i32.i32( , , i32, - i64); + iXLen); -define @intrinsic_vrem_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vrem_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -1714,7 +1716,7 @@ entry: undef, %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1724,10 +1726,10 @@ declare @llvm.riscv.vrem.mask.nxv2i32.i32( , i32, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu @@ -1739,7 +1741,7 @@ entry: %1, i32 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1748,9 +1750,9 @@ declare @llvm.riscv.vrem.nxv4i32.i32( , , i32, - i64); + iXLen); -define @intrinsic_vrem_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vrem_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -1761,7 +1763,7 @@ entry: undef, %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1771,10 +1773,10 @@ declare @llvm.riscv.vrem.mask.nxv4i32.i32( , i32, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -1786,7 +1788,7 @@ entry: %1, i32 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1795,9 +1797,9 @@ declare @llvm.riscv.vrem.nxv8i32.i32( , , i32, - i64); + iXLen); -define @intrinsic_vrem_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vrem_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -1808,7 +1810,7 @@ entry: undef, %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1818,10 +1820,10 @@ declare @llvm.riscv.vrem.mask.nxv8i32.i32( , i32, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -1833,7 +1835,7 @@ entry: %1, i32 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1842,9 +1844,9 @@ declare @llvm.riscv.vrem.nxv16i32.i32( , , i32, - i64); + iXLen); -define @intrinsic_vrem_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { +define @intrinsic_vrem_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma @@ -1855,7 +1857,7 @@ entry: undef, %0, i32 %1, - i64 %2) + iXLen %2) ret %a } @@ -1865,10 +1867,10 @@ declare @llvm.riscv.vrem.mask.nxv16i32.i32( , i32, , - i64, - i64); + iXLen, + iXLen); -define @intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +define @intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu @@ -1880,7 +1882,7 @@ entry: %1, i32 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1889,20 +1891,32 @@ declare @llvm.riscv.vrem.nxv1i64.i64( , , i64, - i64); - -define @intrinsic_vrem_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vrem.vx v8, v8, a0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vrem_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vrem_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vrem.vv v8, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrem_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vrem.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv1i64.i64( undef, %0, i64 %1, - i64 %2) + iXLen %2) ret %a } @@ -1912,22 +1926,34 @@ declare @llvm.riscv.vrem.mask.nxv1i64.i64( , i64, , - i64, - i64); - -define @intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vrem.vv v8, v9, v10, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vrem.vx v8, v9, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vrem.mask.nxv1i64.i64( %0, %1, i64 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1936,20 +1962,32 @@ declare @llvm.riscv.vrem.nxv2i64.i64( , , i64, - i64); - -define @intrinsic_vrem_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vrem.vx v8, v8, a0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vrem_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vrem_vx_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vrem.vv v8, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrem_vx_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vrem.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv2i64.i64( undef, %0, i64 %1, - i64 %2) + iXLen %2) ret %a } @@ -1959,22 +1997,34 @@ declare @llvm.riscv.vrem.mask.nxv2i64.i64( , i64, , - i64, - i64); - -define @intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vrem.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vrem.vv v8, v10, v12, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vrem.vx v8, v10, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vrem.mask.nxv2i64.i64( %0, %1, i64 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -1983,20 +2033,32 @@ declare @llvm.riscv.vrem.nxv4i64.i64( , , i64, - i64); - -define @intrinsic_vrem_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vrem.vx v8, v8, a0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vrem_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vrem_vx_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vrem.vv v8, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrem_vx_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vrem.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv4i64.i64( undef, %0, i64 %1, - i64 %2) + iXLen %2) ret %a } @@ -2006,22 +2068,34 @@ declare @llvm.riscv.vrem.mask.nxv4i64.i64( , i64, , - i64, - i64); - -define @intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vrem.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vrem.vv v8, v12, v16, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vrem.vx v8, v12, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vrem.mask.nxv4i64.i64( %0, %1, i64 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } @@ -2030,20 +2104,32 @@ declare @llvm.riscv.vrem.nxv8i64.i64( , , i64, - i64); - -define @intrinsic_vrem_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vrem_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vrem.vx v8, v8, a0 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vrem_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vrem_vx_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vrem.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrem_vx_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vrem.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv8i64.i64( undef, %0, i64 %1, - i64 %2) + iXLen %2) ret %a } @@ -2053,22 +2139,34 @@ declare @llvm.riscv.vrem.mask.nxv8i64.i64( , i64, , - i64, - i64); - -define @intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vrem.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vrem.vv v8, v16, v24, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vrem.vx v8, v16, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vrem.mask.nxv8i64.i64( %0, %1, i64 %2, %3, - i64 %4, i64 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll deleted file mode 100644 index eeb71ec..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll +++ /dev/null @@ -1,2074 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vremu.nxv1i8.nxv1i8( - , - , - , - i64); - -define @intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vremu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv1i8.nxv1i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv1i8.nxv1i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv1i8.nxv1i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv2i8.nxv2i8( - , - , - , - i64); - -define @intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vremu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv2i8.nxv2i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv2i8.nxv2i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv2i8.nxv2i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv4i8.nxv4i8( - , - , - , - i64); - -define @intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vremu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv4i8.nxv4i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv4i8.nxv4i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv4i8.nxv4i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv8i8.nxv8i8( - , - , - , - i64); - -define @intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vremu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv8i8.nxv8i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv8i8.nxv8i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv8i8.nxv8i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv16i8.nxv16i8( - , - , - , - i64); - -define @intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vremu.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv16i8.nxv16i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv16i8.nxv16i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vremu.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv16i8.nxv16i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv32i8.nxv32i8( - , - , - , - i64); - -define @intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vremu.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv32i8.nxv32i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv32i8.nxv32i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vremu.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv32i8.nxv32i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv64i8.nxv64i8( - , - , - , - i64); - -define @intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vremu.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv64i8.nxv64i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv64i8.nxv64i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv64i8.nxv64i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv1i16.nxv1i16( - , - , - , - i64); - -define @intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vremu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv1i16.nxv1i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv1i16.nxv1i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv1i16.nxv1i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv2i16.nxv2i16( - , - , - , - i64); - -define @intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vremu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv2i16.nxv2i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv2i16.nxv2i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv2i16.nxv2i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv4i16.nxv4i16( - , - , - , - i64); - -define @intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vremu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv4i16.nxv4i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv4i16.nxv4i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv4i16.nxv4i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv8i16.nxv8i16( - , - , - , - i64); - -define @intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vremu.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv8i16.nxv8i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv8i16.nxv8i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vremu.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv8i16.nxv8i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv16i16.nxv16i16( - , - , - , - i64); - -define @intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vremu.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv16i16.nxv16i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv16i16.nxv16i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vremu.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv16i16.nxv16i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv32i16.nxv32i16( - , - , - , - i64); - -define @intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vremu.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv32i16.nxv32i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv32i16.nxv32i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv32i16.nxv32i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv1i32.nxv1i32( - , - , - , - i64); - -define @intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vremu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv1i32.nxv1i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv1i32.nxv1i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv2i32.nxv2i32( - , - , - , - i64); - -define @intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vremu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv2i32.nxv2i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv2i32.nxv2i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv2i32.nxv2i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv4i32.nxv4i32( - , - , - , - i64); - -define @intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vremu.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv4i32.nxv4i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv4i32.nxv4i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vremu.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv4i32.nxv4i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv8i32.nxv8i32( - , - , - , - i64); - -define @intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vremu.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv8i32.nxv8i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv8i32.nxv8i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vremu.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv8i32.nxv8i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv16i32.nxv16i32( - , - , - , - i64); - -define @intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vremu.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv16i32.nxv16i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv16i32.nxv16i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv16i32.nxv16i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv1i64.nxv1i64( - , - , - , - i64); - -define @intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vremu.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv1i64.nxv1i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv1i64.nxv1i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv1i64.nxv1i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv2i64.nxv2i64( - , - , - , - i64); - -define @intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vremu.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv2i64.nxv2i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv2i64.nxv2i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vremu.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv2i64.nxv2i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv4i64.nxv4i64( - , - , - , - i64); - -define @intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vremu.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv4i64.nxv4i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv4i64.nxv4i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vremu.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv4i64.nxv4i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv8i64.nxv8i64( - , - , - , - i64); - -define @intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vremu.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv8i64.nxv8i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv8i64.nxv8i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv8i64.nxv8i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv1i8.i8( - , - , - i8, - i64); - -define @intrinsic_vremu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vx_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vremu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv1i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv1i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv1i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv2i8.i8( - , - , - i8, - i64); - -define @intrinsic_vremu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vx_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vremu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv2i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv2i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv2i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv4i8.i8( - , - , - i8, - i64); - -define @intrinsic_vremu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vx_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vremu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv4i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv4i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv4i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv8i8.i8( - , - , - i8, - i64); - -define @intrinsic_vremu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vx_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vremu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv8i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv8i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv8i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv16i8.i8( - , - , - i8, - i64); - -define @intrinsic_vremu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vx_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vremu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv16i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv16i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vremu.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv16i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv32i8.i8( - , - , - i8, - i64); - -define @intrinsic_vremu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vx_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vremu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv32i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv32i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vremu.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv32i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv64i8.i8( - , - , - i8, - i64); - -define @intrinsic_vremu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vx_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vremu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv64i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv64i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vremu.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv64i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv1i16.i16( - , - , - i16, - i64); - -define @intrinsic_vremu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vx_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vremu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv1i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv1i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv1i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv2i16.i16( - , - , - i16, - i64); - -define @intrinsic_vremu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vx_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vremu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv2i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv2i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv2i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv4i16.i16( - , - , - i16, - i64); - -define @intrinsic_vremu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vx_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vremu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv4i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv4i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv4i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv8i16.i16( - , - , - i16, - i64); - -define @intrinsic_vremu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vx_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vremu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv8i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv8i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vremu.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv8i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv16i16.i16( - , - , - i16, - i64); - -define @intrinsic_vremu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vx_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vremu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv16i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv16i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vremu.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv16i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv32i16.i16( - , - , - i16, - i64); - -define @intrinsic_vremu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vx_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vremu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv32i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv32i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vremu.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv32i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv1i32.i32( - , - , - i32, - i64); - -define @intrinsic_vremu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vx_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vremu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv1i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv1i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv1i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv2i32.i32( - , - , - i32, - i64); - -define @intrinsic_vremu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vx_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vremu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv2i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv2i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv2i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv4i32.i32( - , - , - i32, - i64); - -define @intrinsic_vremu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vx_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vremu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv4i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv4i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vremu.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv4i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv8i32.i32( - , - , - i32, - i64); - -define @intrinsic_vremu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vx_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vremu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv8i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv8i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vremu.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv8i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv16i32.i32( - , - , - i32, - i64); - -define @intrinsic_vremu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vx_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vremu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv16i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv16i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vremu.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv16i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv1i64.i64( - , - , - i64, - i64); - -define @intrinsic_vremu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vremu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv1i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv1i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv2i64.i64( - , - , - i64, - i64); - -define @intrinsic_vremu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vremu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv2i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv2i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vremu.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv4i64.i64( - , - , - i64, - i64); - -define @intrinsic_vremu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vremu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv4i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv4i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vremu.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vremu.nxv8i64.i64( - , - , - i64, - i64); - -define @intrinsic_vremu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vremu.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.nxv8i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vremu.mask.nxv8i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vremu.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vremu.mask.nxv8i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vremu.ll similarity index 80% rename from llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vremu.ll index 048333f..39cd411 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vremu.nxv1i8.nxv1i8( , , , - i32); + iXLen); -define @intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -18,7 +20,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -28,10 +30,10 @@ declare @llvm.riscv.vremu.mask.nxv1i8.nxv1i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -43,7 +45,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -52,9 +54,9 @@ declare @llvm.riscv.vremu.nxv2i8.nxv2i8( , , , - i32); + iXLen); -define @intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -65,7 +67,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -75,10 +77,10 @@ declare @llvm.riscv.vremu.mask.nxv2i8.nxv2i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -90,7 +92,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -99,9 +101,9 @@ declare @llvm.riscv.vremu.nxv4i8.nxv4i8( , , , - i32); + iXLen); -define @intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -112,7 +114,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -122,10 +124,10 @@ declare @llvm.riscv.vremu.mask.nxv4i8.nxv4i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -137,7 +139,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -146,9 +148,9 @@ declare @llvm.riscv.vremu.nxv8i8.nxv8i8( , , , - i32); + iXLen); -define @intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -159,7 +161,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -169,10 +171,10 @@ declare @llvm.riscv.vremu.mask.nxv8i8.nxv8i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -184,7 +186,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -193,9 +195,9 @@ declare @llvm.riscv.vremu.nxv16i8.nxv16i8( , , , - i32); + iXLen); -define @intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -206,7 +208,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -216,10 +218,10 @@ declare @llvm.riscv.vremu.mask.nxv16i8.nxv16i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -231,7 +233,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -240,9 +242,9 @@ declare @llvm.riscv.vremu.nxv32i8.nxv32i8( , , , - i32); + iXLen); -define @intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -253,7 +255,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -263,10 +265,10 @@ declare @llvm.riscv.vremu.mask.nxv32i8.nxv32i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -278,7 +280,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -287,9 +289,9 @@ declare @llvm.riscv.vremu.nxv64i8.nxv64i8( , , , - i32); + iXLen); -define @intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma @@ -300,7 +302,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vremu.mask.nxv64i8.nxv64i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) @@ -326,7 +328,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -335,9 +337,9 @@ declare @llvm.riscv.vremu.nxv1i16.nxv1i16( , , , - i32); + iXLen); -define @intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -348,7 +350,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -358,10 +360,10 @@ declare @llvm.riscv.vremu.mask.nxv1i16.nxv1i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -373,7 +375,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -382,9 +384,9 @@ declare @llvm.riscv.vremu.nxv2i16.nxv2i16( , , , - i32); + iXLen); -define @intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -395,7 +397,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -405,10 +407,10 @@ declare @llvm.riscv.vremu.mask.nxv2i16.nxv2i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -420,7 +422,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -429,9 +431,9 @@ declare @llvm.riscv.vremu.nxv4i16.nxv4i16( , , , - i32); + iXLen); -define @intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -442,7 +444,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -452,10 +454,10 @@ declare @llvm.riscv.vremu.mask.nxv4i16.nxv4i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -467,7 +469,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -476,9 +478,9 @@ declare @llvm.riscv.vremu.nxv8i16.nxv8i16( , , , - i32); + iXLen); -define @intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -489,7 +491,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -499,10 +501,10 @@ declare @llvm.riscv.vremu.mask.nxv8i16.nxv8i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -514,7 +516,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -523,9 +525,9 @@ declare @llvm.riscv.vremu.nxv16i16.nxv16i16( , , , - i32); + iXLen); -define @intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -536,7 +538,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -546,10 +548,10 @@ declare @llvm.riscv.vremu.mask.nxv16i16.nxv16i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -561,7 +563,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -570,9 +572,9 @@ declare @llvm.riscv.vremu.nxv32i16.nxv32i16( , , , - i32); + iXLen); -define @intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma @@ -583,7 +585,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -593,10 +595,10 @@ declare @llvm.riscv.vremu.mask.nxv32i16.nxv32i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -609,7 +611,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -618,9 +620,9 @@ declare @llvm.riscv.vremu.nxv1i32.nxv1i32( , , , - i32); + iXLen); -define @intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -631,7 +633,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -641,10 +643,10 @@ declare @llvm.riscv.vremu.mask.nxv1i32.nxv1i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -656,7 +658,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -665,9 +667,9 @@ declare @llvm.riscv.vremu.nxv2i32.nxv2i32( , , , - i32); + iXLen); -define @intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -678,7 +680,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -688,10 +690,10 @@ declare @llvm.riscv.vremu.mask.nxv2i32.nxv2i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -703,7 +705,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -712,9 +714,9 @@ declare @llvm.riscv.vremu.nxv4i32.nxv4i32( , , , - i32); + iXLen); -define @intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -725,7 +727,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -735,10 +737,10 @@ declare @llvm.riscv.vremu.mask.nxv4i32.nxv4i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -750,7 +752,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -759,9 +761,9 @@ declare @llvm.riscv.vremu.nxv8i32.nxv8i32( , , , - i32); + iXLen); -define @intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -772,7 +774,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -782,10 +784,10 @@ declare @llvm.riscv.vremu.mask.nxv8i32.nxv8i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -797,7 +799,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -806,9 +808,9 @@ declare @llvm.riscv.vremu.nxv16i32.nxv16i32( , , , - i32); + iXLen); -define @intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -819,7 +821,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -829,10 +831,10 @@ declare @llvm.riscv.vremu.mask.nxv16i32.nxv16i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -845,7 +847,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -854,9 +856,9 @@ declare @llvm.riscv.vremu.nxv1i64.nxv1i64( , , , - i32); + iXLen); -define @intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -867,7 +869,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -877,10 +879,10 @@ declare @llvm.riscv.vremu.mask.nxv1i64.nxv1i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -892,7 +894,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -901,9 +903,9 @@ declare @llvm.riscv.vremu.nxv2i64.nxv2i64( , , , - i32); + iXLen); -define @intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -914,7 +916,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -924,10 +926,10 @@ declare @llvm.riscv.vremu.mask.nxv2i64.nxv2i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -939,7 +941,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vremu.nxv4i64.nxv4i64( , , , - i32); + iXLen); -define @intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -961,7 +963,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -971,10 +973,10 @@ declare @llvm.riscv.vremu.mask.nxv4i64.nxv4i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -986,7 +988,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -995,9 +997,9 @@ declare @llvm.riscv.vremu.nxv8i64.nxv8i64( , , , - i32); + iXLen); -define @intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma @@ -1008,7 +1010,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -1018,10 +1020,10 @@ declare @llvm.riscv.vremu.mask.nxv8i64.nxv8i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -1034,7 +1036,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1043,9 +1045,9 @@ declare @llvm.riscv.vremu.nxv1i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vremu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vremu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -1056,7 +1058,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1066,10 +1068,10 @@ declare @llvm.riscv.vremu.mask.nxv1i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu @@ -1081,7 +1083,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1090,9 +1092,9 @@ declare @llvm.riscv.vremu.nxv2i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vremu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vremu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -1103,7 +1105,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1113,10 +1115,10 @@ declare @llvm.riscv.vremu.mask.nxv2i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu @@ -1128,7 +1130,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1137,9 +1139,9 @@ declare @llvm.riscv.vremu.nxv4i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vremu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vremu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -1150,7 +1152,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1160,10 +1162,10 @@ declare @llvm.riscv.vremu.mask.nxv4i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu @@ -1175,7 +1177,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1184,9 +1186,9 @@ declare @llvm.riscv.vremu.nxv8i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vremu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vremu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -1197,7 +1199,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1207,10 +1209,10 @@ declare @llvm.riscv.vremu.mask.nxv8i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu @@ -1222,7 +1224,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1231,9 +1233,9 @@ declare @llvm.riscv.vremu.nxv16i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vremu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vremu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -1244,7 +1246,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1254,10 +1256,10 @@ declare @llvm.riscv.vremu.mask.nxv16i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu @@ -1269,7 +1271,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1278,9 +1280,9 @@ declare @llvm.riscv.vremu.nxv32i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vremu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vremu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -1291,7 +1293,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1301,10 +1303,10 @@ declare @llvm.riscv.vremu.mask.nxv32i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu @@ -1316,7 +1318,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1325,9 +1327,9 @@ declare @llvm.riscv.vremu.nxv64i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vremu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vremu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma @@ -1338,7 +1340,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1348,10 +1350,10 @@ declare @llvm.riscv.vremu.mask.nxv64i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu @@ -1363,7 +1365,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1372,9 +1374,9 @@ declare @llvm.riscv.vremu.nxv1i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vremu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vremu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -1385,7 +1387,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1395,10 +1397,10 @@ declare @llvm.riscv.vremu.mask.nxv1i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu @@ -1410,7 +1412,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1419,9 +1421,9 @@ declare @llvm.riscv.vremu.nxv2i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vremu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vremu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -1432,7 +1434,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1442,10 +1444,10 @@ declare @llvm.riscv.vremu.mask.nxv2i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu @@ -1457,7 +1459,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1466,9 +1468,9 @@ declare @llvm.riscv.vremu.nxv4i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vremu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vremu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -1479,7 +1481,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1489,10 +1491,10 @@ declare @llvm.riscv.vremu.mask.nxv4i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -1504,7 +1506,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1513,9 +1515,9 @@ declare @llvm.riscv.vremu.nxv8i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vremu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vremu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -1526,7 +1528,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1536,10 +1538,10 @@ declare @llvm.riscv.vremu.mask.nxv8i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -1551,7 +1553,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1560,9 +1562,9 @@ declare @llvm.riscv.vremu.nxv16i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vremu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vremu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -1573,7 +1575,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1583,10 +1585,10 @@ declare @llvm.riscv.vremu.mask.nxv16i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu @@ -1598,7 +1600,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1607,9 +1609,9 @@ declare @llvm.riscv.vremu.nxv32i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vremu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vremu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma @@ -1620,7 +1622,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1630,10 +1632,10 @@ declare @llvm.riscv.vremu.mask.nxv32i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu @@ -1645,7 +1647,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1654,9 +1656,9 @@ declare @llvm.riscv.vremu.nxv1i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vremu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vremu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -1667,7 +1669,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1677,10 +1679,10 @@ declare @llvm.riscv.vremu.mask.nxv1i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu @@ -1692,7 +1694,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1701,9 +1703,9 @@ declare @llvm.riscv.vremu.nxv2i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vremu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vremu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -1714,7 +1716,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1724,10 +1726,10 @@ declare @llvm.riscv.vremu.mask.nxv2i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu @@ -1739,7 +1741,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1748,9 +1750,9 @@ declare @llvm.riscv.vremu.nxv4i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vremu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vremu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -1761,7 +1763,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1771,10 +1773,10 @@ declare @llvm.riscv.vremu.mask.nxv4i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -1786,7 +1788,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1795,9 +1797,9 @@ declare @llvm.riscv.vremu.nxv8i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vremu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vremu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -1808,7 +1810,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1818,10 +1820,10 @@ declare @llvm.riscv.vremu.mask.nxv8i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -1833,7 +1835,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1842,9 +1844,9 @@ declare @llvm.riscv.vremu.nxv16i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vremu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vremu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma @@ -1855,7 +1857,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1865,10 +1867,10 @@ declare @llvm.riscv.vremu.mask.nxv16i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu @@ -1880,7 +1882,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1889,26 +1891,32 @@ declare @llvm.riscv.vremu.nxv1i64.i64( , , i64, - i32); - -define @intrinsic_vremu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vremu.vv v8, v8, v9 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vremu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vremu_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vremu.vv v8, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vremu_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vremu.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv1i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1918,28 +1926,34 @@ declare @llvm.riscv.vremu.mask.nxv1i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vremu.vv v8, v9, v10, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vremu.vx v8, v9, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vremu.mask.nxv1i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1948,26 +1962,32 @@ declare @llvm.riscv.vremu.nxv2i64.i64( , , i64, - i32); - -define @intrinsic_vremu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vremu.vv v8, v8, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vremu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vremu_vx_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vremu.vv v8, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vremu_vx_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vremu.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv2i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1977,28 +1997,34 @@ declare @llvm.riscv.vremu.mask.nxv2i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vremu.vv v8, v10, v12, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vremu.vv v8, v10, v12, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vremu.vx v8, v10, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vremu.mask.nxv2i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -2007,26 +2033,32 @@ declare @llvm.riscv.vremu.nxv4i64.i64( , , i64, - i32); - -define @intrinsic_vremu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vremu.vv v8, v8, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vremu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vremu_vx_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vremu.vv v8, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vremu_vx_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vremu.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv4i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -2036,28 +2068,34 @@ declare @llvm.riscv.vremu.mask.nxv4i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vremu.vv v8, v12, v16, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vremu.vv v8, v12, v16, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vremu.vx v8, v12, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vremu.mask.nxv4i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -2066,26 +2104,32 @@ declare @llvm.riscv.vremu.nxv8i64.i64( , , i64, - i32); - -define @intrinsic_vremu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vremu_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vremu.vv v8, v8, v16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vremu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vremu_vx_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vremu.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vremu_vx_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vremu.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv8i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -2095,28 +2139,34 @@ declare @llvm.riscv.vremu.mask.nxv8i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vremu.vv v8, v16, v24, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vremu.vx v8, v16, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vremu.mask.nxv8i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll deleted file mode 100644 index 77ebd75..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll +++ /dev/null @@ -1,1058 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vsbc.nxv1i8.nxv1i8( - , - , - , - , - i64); - -define @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv1i8.nxv1i8( - undef, - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv2i8.nxv2i8( - , - , - , - , - i64); - -define @intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv2i8.nxv2i8( - undef, - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv4i8.nxv4i8( - , - , - , - , - i64); - -define @intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv4i8.nxv4i8( - undef, - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv8i8.nxv8i8( - , - , - , - , - i64); - -define @intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv8i8.nxv8i8( - undef, - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv16i8.nxv16i8( - , - , - , - , - i64); - -define @intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vsbc.vvm v8, v8, v10, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv16i8.nxv16i8( - undef, - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv32i8.nxv32i8( - , - , - , - , - i64); - -define @intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vsbc.vvm v8, v8, v12, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv32i8.nxv32i8( - undef, - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv64i8.nxv64i8( - , - , - , - , - i64); - -define @intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vsbc.vvm v8, v8, v16, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv64i8.nxv64i8( - undef, - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv1i16.nxv1i16( - , - , - , - , - i64); - -define @intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv1i16.nxv1i16( - undef, - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv2i16.nxv2i16( - , - , - , - , - i64); - -define @intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv2i16.nxv2i16( - undef, - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv4i16.nxv4i16( - , - , - , - , - i64); - -define @intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv4i16.nxv4i16( - undef, - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv8i16.nxv8i16( - , - , - , - , - i64); - -define @intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vsbc.vvm v8, v8, v10, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv8i16.nxv8i16( - undef, - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv16i16.nxv16i16( - , - , - , - , - i64); - -define @intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vsbc.vvm v8, v8, v12, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv16i16.nxv16i16( - undef, - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv32i16.nxv32i16( - , - , - , - , - i64); - -define @intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vsbc.vvm v8, v8, v16, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv32i16.nxv32i16( - undef, - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv1i32.nxv1i32( - , - , - , - , - i64); - -define @intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv1i32.nxv1i32( - undef, - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv2i32.nxv2i32( - , - , - , - , - i64); - -define @intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv2i32.nxv2i32( - undef, - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv4i32.nxv4i32( - , - , - , - , - i64); - -define @intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vsbc.vvm v8, v8, v10, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv4i32.nxv4i32( - undef, - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv8i32.nxv8i32( - , - , - , - , - i64); - -define @intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vsbc.vvm v8, v8, v12, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv8i32.nxv8i32( - undef, - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv16i32.nxv16i32( - , - , - , - , - i64); - -define @intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vsbc.vvm v8, v8, v16, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv16i32.nxv16i32( - undef, - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv1i64.nxv1i64( - , - , - , - , - i64); - -define @intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv1i64.nxv1i64( - undef, - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv2i64.nxv2i64( - , - , - , - , - i64); - -define @intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vsbc.vvm v8, v8, v10, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv2i64.nxv2i64( - undef, - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv4i64.nxv4i64( - , - , - , - , - i64); - -define @intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vsbc.vvm v8, v8, v12, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv4i64.nxv4i64( - undef, - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv8i64.nxv8i64( - , - , - , - , - i64); - -define @intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vsbc.vvm v8, v8, v16, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv8i64.nxv8i64( - undef, - %0, - %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv1i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv1i8.i8( - undef, - %0, - i8 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv2i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv2i8.i8( - undef, - %0, - i8 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv4i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv4i8.i8( - undef, - %0, - i8 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv8i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv8i8.i8( - undef, - %0, - i8 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv16i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv16i8.i8( - undef, - %0, - i8 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv32i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv32i8.i8( - undef, - %0, - i8 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv64i8.i8( - , - , - i8, - , - i64); - -define @intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8( %0, i8 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv64i8.i8( - undef, - %0, - i8 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv1i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv1i16.i16( - undef, - %0, - i16 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv2i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv2i16.i16( - undef, - %0, - i16 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv4i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv4i16.i16( - undef, - %0, - i16 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv8i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv8i16.i16( - undef, - %0, - i16 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv16i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv16i16.i16( - undef, - %0, - i16 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv32i16.i16( - , - , - i16, - , - i64); - -define @intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16( %0, i16 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv32i16.i16( - undef, - %0, - i16 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv1i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv1i32.i32( - undef, - %0, - i32 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv2i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv2i32.i32( - undef, - %0, - i32 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv4i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv4i32.i32( - undef, - %0, - i32 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv8i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv8i32.i32( - undef, - %0, - i32 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv16i32.i32( - , - , - i32, - , - i64); - -define @intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32( %0, i32 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv16i32.i32( - undef, - %0, - i32 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv1i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv1i64.i64( - undef, - %0, - i64 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv2i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv2i64.i64( - undef, - %0, - i64 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv4i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv4i64.i64( - undef, - %0, - i64 %1, - %2, - i64 %3) - - ret %a -} - -declare @llvm.riscv.vsbc.nxv8i64.i64( - , - , - i64, - , - i64); - -define @intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64( %0, i64 %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vsbc.nxv8i64.i64( - undef, - %0, - i64 %1, - %2, - i64 %3) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsbc.ll similarity index 82% rename from llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vsbc.ll index eefe37d..6ec17c0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsbc.ll @@ -1,14 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vsbc.nxv1i8.nxv1i8( , , , , - i32); + iXLen); -define @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -20,7 +22,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -30,9 +32,9 @@ declare @llvm.riscv.vsbc.nxv2i8.nxv2i8( , , , - i32); + iXLen); -define @intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -44,7 +46,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -54,9 +56,9 @@ declare @llvm.riscv.vsbc.nxv4i8.nxv4i8( , , , - i32); + iXLen); -define @intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -68,7 +70,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -78,9 +80,9 @@ declare @llvm.riscv.vsbc.nxv8i8.nxv8i8( , , , - i32); + iXLen); -define @intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -92,7 +94,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -102,9 +104,9 @@ declare @llvm.riscv.vsbc.nxv16i8.nxv16i8( , , , - i32); + iXLen); -define @intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -116,7 +118,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -126,9 +128,9 @@ declare @llvm.riscv.vsbc.nxv32i8.nxv32i8( , , , - i32); + iXLen); -define @intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -140,7 +142,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -150,9 +152,9 @@ declare @llvm.riscv.vsbc.nxv64i8.nxv64i8( , , , - i32); + iXLen); -define @intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma @@ -164,7 +166,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -174,9 +176,9 @@ declare @llvm.riscv.vsbc.nxv1i16.nxv1i16( , , , - i32); + iXLen); -define @intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -188,7 +190,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -198,9 +200,9 @@ declare @llvm.riscv.vsbc.nxv2i16.nxv2i16( , , , - i32); + iXLen); -define @intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -212,7 +214,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -222,9 +224,9 @@ declare @llvm.riscv.vsbc.nxv4i16.nxv4i16( , , , - i32); + iXLen); -define @intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -236,7 +238,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -246,9 +248,9 @@ declare @llvm.riscv.vsbc.nxv8i16.nxv8i16( , , , - i32); + iXLen); -define @intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -260,7 +262,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -270,9 +272,9 @@ declare @llvm.riscv.vsbc.nxv16i16.nxv16i16( , , , - i32); + iXLen); -define @intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -284,7 +286,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -294,9 +296,9 @@ declare @llvm.riscv.vsbc.nxv32i16.nxv32i16( , , , - i32); + iXLen); -define @intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma @@ -308,7 +310,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -318,9 +320,9 @@ declare @llvm.riscv.vsbc.nxv1i32.nxv1i32( , , , - i32); + iXLen); -define @intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -332,7 +334,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -342,9 +344,9 @@ declare @llvm.riscv.vsbc.nxv2i32.nxv2i32( , , , - i32); + iXLen); -define @intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -356,7 +358,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -366,9 +368,9 @@ declare @llvm.riscv.vsbc.nxv4i32.nxv4i32( , , , - i32); + iXLen); -define @intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -380,7 +382,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -390,9 +392,9 @@ declare @llvm.riscv.vsbc.nxv8i32.nxv8i32( , , , - i32); + iXLen); -define @intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -404,7 +406,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -414,9 +416,9 @@ declare @llvm.riscv.vsbc.nxv16i32.nxv16i32( , , , - i32); + iXLen); -define @intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -428,7 +430,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -438,9 +440,9 @@ declare @llvm.riscv.vsbc.nxv1i64.nxv1i64( , , , - i32); + iXLen); -define @intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -452,7 +454,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -462,9 +464,9 @@ declare @llvm.riscv.vsbc.nxv2i64.nxv2i64( , , , - i32); + iXLen); -define @intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -476,7 +478,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -486,9 +488,9 @@ declare @llvm.riscv.vsbc.nxv4i64.nxv4i64( , , , - i32); + iXLen); -define @intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -500,7 +502,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -510,9 +512,9 @@ declare @llvm.riscv.vsbc.nxv8i64.nxv8i64( , , , - i32); + iXLen); -define @intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma @@ -524,7 +526,7 @@ entry: %0, %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -534,9 +536,9 @@ declare @llvm.riscv.vsbc.nxv1i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -548,7 +550,7 @@ entry: %0, i8 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -558,9 +560,9 @@ declare @llvm.riscv.vsbc.nxv2i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -572,7 +574,7 @@ entry: %0, i8 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -582,9 +584,9 @@ declare @llvm.riscv.vsbc.nxv4i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -596,7 +598,7 @@ entry: %0, i8 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -606,9 +608,9 @@ declare @llvm.riscv.vsbc.nxv8i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -620,7 +622,7 @@ entry: %0, i8 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -630,9 +632,9 @@ declare @llvm.riscv.vsbc.nxv16i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -644,7 +646,7 @@ entry: %0, i8 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -654,9 +656,9 @@ declare @llvm.riscv.vsbc.nxv32i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -668,7 +670,7 @@ entry: %0, i8 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -678,9 +680,9 @@ declare @llvm.riscv.vsbc.nxv64i8.i8( , i8, , - i32); + iXLen); -define @intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma @@ -692,7 +694,7 @@ entry: %0, i8 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -702,9 +704,9 @@ declare @llvm.riscv.vsbc.nxv1i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -716,7 +718,7 @@ entry: %0, i16 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -726,9 +728,9 @@ declare @llvm.riscv.vsbc.nxv2i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -740,7 +742,7 @@ entry: %0, i16 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -750,9 +752,9 @@ declare @llvm.riscv.vsbc.nxv4i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -764,7 +766,7 @@ entry: %0, i16 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -774,9 +776,9 @@ declare @llvm.riscv.vsbc.nxv8i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -788,7 +790,7 @@ entry: %0, i16 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -798,9 +800,9 @@ declare @llvm.riscv.vsbc.nxv16i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -812,7 +814,7 @@ entry: %0, i16 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -822,9 +824,9 @@ declare @llvm.riscv.vsbc.nxv32i16.i16( , i16, , - i32); + iXLen); -define @intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma @@ -836,7 +838,7 @@ entry: %0, i16 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -846,9 +848,9 @@ declare @llvm.riscv.vsbc.nxv1i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -860,7 +862,7 @@ entry: %0, i32 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -870,9 +872,9 @@ declare @llvm.riscv.vsbc.nxv2i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -884,7 +886,7 @@ entry: %0, i32 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -894,9 +896,9 @@ declare @llvm.riscv.vsbc.nxv4i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -908,7 +910,7 @@ entry: %0, i32 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -918,9 +920,9 @@ declare @llvm.riscv.vsbc.nxv8i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -932,7 +934,7 @@ entry: %0, i32 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -942,9 +944,9 @@ declare @llvm.riscv.vsbc.nxv16i32.i32( , i32, , - i32); + iXLen); -define @intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +define @intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma @@ -956,7 +958,7 @@ entry: %0, i32 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -966,27 +968,33 @@ declare @llvm.riscv.vsbc.nxv1i64.i64( , i64, , - i32); - -define @intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vsbc.vvm v8, v8, v9, v0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vsbc.vxm v8, v8, a0, v0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv1i64.i64( undef, %0, i64 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -996,27 +1004,33 @@ declare @llvm.riscv.vsbc.nxv2i64.i64( , i64, , - i32); - -define @intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsbc.vvm v8, v8, v10, v0 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsbc.vvm v8, v8, v10, v0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vsbc.vxm v8, v8, a0, v0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv2i64.i64( undef, %0, i64 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1026,27 +1040,33 @@ declare @llvm.riscv.vsbc.nxv4i64.i64( , i64, , - i32); - -define @intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsbc.vvm v8, v8, v12, v0 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vsbc.vvm v8, v8, v12, v0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vsbc.vxm v8, v8, a0, v0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv4i64.i64( undef, %0, i64 %1, %2, - i32 %3) + iXLen %3) ret %a } @@ -1056,27 +1076,33 @@ declare @llvm.riscv.vsbc.nxv8i64.i64( , i64, , - i32); - -define @intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64( %0, i64 %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsbc.vvm v8, v8, v16, v0 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsbc.vvm v8, v8, v16, v0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vsbc.vxm v8, v8, a0, v0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv8i64.i64( undef, %0, i64 %1, %2, - i32 %3) + iXLen %3) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll deleted file mode 100644 index 29772e3..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll +++ /dev/null @@ -1,458 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s - -define @vfmerge_vv_nxv1f16( %va, %vb, %cond) { -; CHECK-LABEL: vfmerge_vv_nxv1f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vfmerge_fv_nxv1f16( %va, half %b, %cond) { -; CHECK-LABEL: vfmerge_fv_nxv1f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, half %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vfmerge_vv_nxv2f16( %va, %vb, %cond) { -; CHECK-LABEL: vfmerge_vv_nxv2f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vfmerge_fv_nxv2f16( %va, half %b, %cond) { -; CHECK-LABEL: vfmerge_fv_nxv2f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, half %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vfmerge_vv_nxv4f16( %va, %vb, %cond) { -; CHECK-LABEL: vfmerge_vv_nxv4f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vfmerge_fv_nxv4f16( %va, half %b, %cond) { -; CHECK-LABEL: vfmerge_fv_nxv4f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, half %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vfmerge_vv_nxv8f16( %va, %vb, %cond) { -; CHECK-LABEL: vfmerge_vv_nxv8f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vfmerge_fv_nxv8f16( %va, half %b, %cond) { -; CHECK-LABEL: vfmerge_fv_nxv8f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, half %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vfmerge_zv_nxv8f16( %va, %cond) { -; CHECK-LABEL: vfmerge_zv_nxv8f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, half zeroinitializer, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_truelhs_nxv8f16_0( %va, %vb) { -; CHECK-LABEL: vmerge_truelhs_nxv8f16_0: -; CHECK: # %bb.0: -; CHECK-NEXT: ret - %mhead = insertelement poison, i1 1, i32 0 - %mtrue = shufflevector %mhead, poison, zeroinitializer - %vc = select %mtrue, %va, %vb - ret %vc -} - -define @vmerge_falselhs_nxv8f16_0( %va, %vb) { -; CHECK-LABEL: vmerge_falselhs_nxv8f16_0: -; CHECK: # %bb.0: -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret - %vc = select zeroinitializer, %va, %vb - ret %vc -} - -define @vfmerge_vv_nxv16f16( %va, %vb, %cond) { -; CHECK-LABEL: vfmerge_vv_nxv16f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vfmerge_fv_nxv16f16( %va, half %b, %cond) { -; CHECK-LABEL: vfmerge_fv_nxv16f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, half %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vfmerge_vv_nxv32f16( %va, %vb, %cond) { -; CHECK-LABEL: vfmerge_vv_nxv32f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vfmerge_fv_nxv32f16( %va, half %b, %cond) { -; CHECK-LABEL: vfmerge_fv_nxv32f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, half %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vfmerge_vv_nxv1f32( %va, %vb, %cond) { -; CHECK-LABEL: vfmerge_vv_nxv1f32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vfmerge_fv_nxv1f32( %va, float %b, %cond) { -; CHECK-LABEL: vfmerge_fv_nxv1f32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, float %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vfmerge_vv_nxv2f32( %va, %vb, %cond) { -; CHECK-LABEL: vfmerge_vv_nxv2f32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vfmerge_fv_nxv2f32( %va, float %b, %cond) { -; CHECK-LABEL: vfmerge_fv_nxv2f32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, float %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vfmerge_vv_nxv4f32( %va, %vb, %cond) { -; CHECK-LABEL: vfmerge_vv_nxv4f32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vfmerge_fv_nxv4f32( %va, float %b, %cond) { -; CHECK-LABEL: vfmerge_fv_nxv4f32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, float %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vfmerge_vv_nxv8f32( %va, %vb, %cond) { -; CHECK-LABEL: vfmerge_vv_nxv8f32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vfmerge_fv_nxv8f32( %va, float %b, %cond) { -; CHECK-LABEL: vfmerge_fv_nxv8f32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, float %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vfmerge_zv_nxv8f32( %va, %cond) { -; CHECK-LABEL: vfmerge_zv_nxv8f32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, float zeroinitializer, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vfmerge_vv_nxv16f32( %va, %vb, %cond) { -; CHECK-LABEL: vfmerge_vv_nxv16f32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vfmerge_fv_nxv16f32( %va, float %b, %cond) { -; CHECK-LABEL: vfmerge_fv_nxv16f32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, float %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vfmerge_vv_nxv1f64( %va, %vb, %cond) { -; CHECK-LABEL: vfmerge_vv_nxv1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vfmerge_fv_nxv1f64( %va, double %b, %cond) { -; CHECK-LABEL: vfmerge_fv_nxv1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, double %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vfmerge_vv_nxv2f64( %va, %vb, %cond) { -; CHECK-LABEL: vfmerge_vv_nxv2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vfmerge_fv_nxv2f64( %va, double %b, %cond) { -; CHECK-LABEL: vfmerge_fv_nxv2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, double %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vfmerge_vv_nxv4f64( %va, %vb, %cond) { -; CHECK-LABEL: vfmerge_vv_nxv4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vfmerge_fv_nxv4f64( %va, double %b, %cond) { -; CHECK-LABEL: vfmerge_fv_nxv4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, double %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vfmerge_vv_nxv8f64( %va, %vb, %cond) { -; CHECK-LABEL: vfmerge_vv_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vfmerge_fv_nxv8f64( %va, double %b, %cond) { -; CHECK-LABEL: vfmerge_fv_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma -; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, double %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vfmerge_zv_nxv8f64( %va, %cond) { -; CHECK-LABEL: vfmerge_zv_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma -; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, double zeroinitializer, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vselect_combine_regression( %va, %vb) { -; CHECK-LABEL: vselect_combine_regression: -; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 -; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 -; CHECK-NEXT: add a1, a0, a1 -; CHECK-NEXT: vl8re64.v v8, (a1) -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: vl8re64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma -; CHECK-NEXT: vmseq.vi v24, v16, 0 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vmseq.vi v0, v16, 0 -; CHECK-NEXT: vmv.v.i v16, 0 -; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 -; CHECK-NEXT: add sp, sp, a0 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret - %cond = icmp eq %va, zeroinitializer - %sel = select %cond, %vb, zeroinitializer - ret %sel -} - -define void @vselect_legalize_regression( %a, %ma, %mb, * %out) { -; CHECK-LABEL: vselect_legalize_regression: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, ma -; CHECK-NEXT: vlm.v v24, (a0) -; CHECK-NEXT: vmand.mm v1, v0, v24 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: srli a2, a0, 3 -; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma -; CHECK-NEXT: vslidedown.vx v0, v1, a2 -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv.v.i v24, 0 -; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0 -; CHECK-NEXT: vmv1r.v v0, v1 -; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0 -; CHECK-NEXT: vs8r.v v8, (a1) -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add a0, a1, a0 -; CHECK-NEXT: vs8r.v v16, (a0) -; CHECK-NEXT: ret - %cond = and %ma, %mb - %sel = select %cond, %a, zeroinitializer - store %sel, * %out - ret void -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll similarity index 99% rename from llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll index 54682aa..24426b5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s define @vfmerge_vv_nxv1f16( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv1f16: diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll deleted file mode 100644 index a3a61b2..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll +++ /dev/null @@ -1,827 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s - -define @vmerge_vv_nxv1i8( %va, %vb, %cond) { -; CHECK-LABEL: vmerge_vv_nxv1i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vmerge_xv_nxv1i8( %va, i8 signext %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv1i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i8 %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_iv_nxv1i8( %va, %cond) { -; CHECK-LABEL: vmerge_iv_nxv1i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i8 3, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_vv_nxv2i8( %va, %vb, %cond) { -; CHECK-LABEL: vmerge_vv_nxv2i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vmerge_xv_nxv2i8( %va, i8 signext %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv2i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma -; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i8 %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_iv_nxv2i8( %va, %cond) { -; CHECK-LABEL: vmerge_iv_nxv2i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma -; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i8 3, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_vv_nxv3i8( %va, %vb, %cond) { -; CHECK-LABEL: vmerge_vv_nxv3i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vmerge_xv_nxv3i8( %va, i8 signext %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv3i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma -; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i8 %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_iv_nxv3i8( %va, %cond) { -; CHECK-LABEL: vmerge_iv_nxv3i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma -; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i8 3, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_vv_nxv4i8( %va, %vb, %cond) { -; CHECK-LABEL: vmerge_vv_nxv4i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vmerge_xv_nxv4i8( %va, i8 signext %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv4i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma -; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i8 %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_iv_nxv4i8( %va, %cond) { -; CHECK-LABEL: vmerge_iv_nxv4i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma -; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i8 3, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_vv_nxv8i8( %va, %vb, %cond) { -; CHECK-LABEL: vmerge_vv_nxv8i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vmerge_xv_nxv8i8( %va, i8 signext %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv8i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma -; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i8 %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_iv_nxv8i8( %va, %cond) { -; CHECK-LABEL: vmerge_iv_nxv8i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i8 3, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_vv_nxv16i8( %va, %vb, %cond) { -; CHECK-LABEL: vmerge_vv_nxv16i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vmerge_xv_nxv16i8( %va, i8 signext %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv16i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma -; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i8 %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_iv_nxv16i8( %va, %cond) { -; CHECK-LABEL: vmerge_iv_nxv16i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i8 3, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_vv_nxv32i8( %va, %vb, %cond) { -; CHECK-LABEL: vmerge_vv_nxv32i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vmerge_xv_nxv32i8( %va, i8 signext %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv32i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma -; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i8 %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_iv_nxv32i8( %va, %cond) { -; CHECK-LABEL: vmerge_iv_nxv32i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i8 3, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_vv_nxv64i8( %va, %vb, %cond) { -; CHECK-LABEL: vmerge_vv_nxv64i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vmerge_xv_nxv64i8( %va, i8 signext %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv64i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma -; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i8 %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_iv_nxv64i8( %va, %cond) { -; CHECK-LABEL: vmerge_iv_nxv64i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma -; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i8 3, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_vv_nxv1i16( %va, %vb, %cond) { -; CHECK-LABEL: vmerge_vv_nxv1i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vmerge_xv_nxv1i16( %va, i16 signext %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv1i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma -; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i16 %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_iv_nxv1i16( %va, %cond) { -; CHECK-LABEL: vmerge_iv_nxv1i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma -; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i16 3, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_vv_nxv2i16( %va, %vb, %cond) { -; CHECK-LABEL: vmerge_vv_nxv2i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vmerge_xv_nxv2i16( %va, i16 signext %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv2i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma -; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i16 %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_iv_nxv2i16( %va, %cond) { -; CHECK-LABEL: vmerge_iv_nxv2i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma -; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i16 3, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_vv_nxv4i16( %va, %vb, %cond) { -; CHECK-LABEL: vmerge_vv_nxv4i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vmerge_xv_nxv4i16( %va, i16 signext %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv4i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma -; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i16 %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_iv_nxv4i16( %va, %cond) { -; CHECK-LABEL: vmerge_iv_nxv4i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma -; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i16 3, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_vv_nxv8i16( %va, %vb, %cond) { -; CHECK-LABEL: vmerge_vv_nxv8i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vmerge_xv_nxv8i16( %va, i16 signext %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv8i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma -; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i16 %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_iv_nxv8i16( %va, %cond) { -; CHECK-LABEL: vmerge_iv_nxv8i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma -; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i16 3, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_vv_nxv16i16( %va, %vb, %cond) { -; CHECK-LABEL: vmerge_vv_nxv16i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vmerge_xv_nxv16i16( %va, i16 signext %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv16i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma -; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i16 %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_iv_nxv16i16( %va, %cond) { -; CHECK-LABEL: vmerge_iv_nxv16i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i16 3, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_vv_nxv32i16( %va, %vb, %cond) { -; CHECK-LABEL: vmerge_vv_nxv32i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vmerge_xv_nxv32i16( %va, i16 signext %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv32i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma -; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i16 %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_iv_nxv32i16( %va, %cond) { -; CHECK-LABEL: vmerge_iv_nxv32i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma -; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i16 3, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_vv_nxv1i32( %va, %vb, %cond) { -; CHECK-LABEL: vmerge_vv_nxv1i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vmerge_xv_nxv1i32( %va, i32 %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv1i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma -; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i32 %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_iv_nxv1i32( %va, %cond) { -; CHECK-LABEL: vmerge_iv_nxv1i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma -; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i32 3, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_vv_nxv2i32( %va, %vb, %cond) { -; CHECK-LABEL: vmerge_vv_nxv2i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vmerge_xv_nxv2i32( %va, i32 %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv2i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma -; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i32 %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_iv_nxv2i32( %va, %cond) { -; CHECK-LABEL: vmerge_iv_nxv2i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma -; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i32 3, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_vv_nxv4i32( %va, %vb, %cond) { -; CHECK-LABEL: vmerge_vv_nxv4i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vmerge_xv_nxv4i32( %va, i32 %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv4i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma -; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i32 %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_iv_nxv4i32( %va, %cond) { -; CHECK-LABEL: vmerge_iv_nxv4i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma -; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i32 3, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_vv_nxv8i32( %va, %vb, %cond) { -; CHECK-LABEL: vmerge_vv_nxv8i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vmerge_xv_nxv8i32( %va, i32 %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv8i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma -; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i32 %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_iv_nxv8i32( %va, %cond) { -; CHECK-LABEL: vmerge_iv_nxv8i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma -; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i32 3, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_vv_nxv16i32( %va, %vb, %cond) { -; CHECK-LABEL: vmerge_vv_nxv16i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vmerge_xv_nxv16i32( %va, i32 %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv16i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma -; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i32 %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_iv_nxv16i32( %va, %cond) { -; CHECK-LABEL: vmerge_iv_nxv16i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma -; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i32 3, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_vv_nxv1i64( %va, %vb, %cond) { -; CHECK-LABEL: vmerge_vv_nxv1i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vmerge_xv_nxv1i64( %va, i64 %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv1i64: -; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v8, (a0), zero, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret - %head = insertelement poison, i64 %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_iv_nxv1i64( %va, %cond) { -; CHECK-LABEL: vmerge_iv_nxv1i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma -; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i64 3, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_vv_nxv2i64( %va, %vb, %cond) { -; CHECK-LABEL: vmerge_vv_nxv2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vmerge_xv_nxv2i64( %va, i64 %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu -; CHECK-NEXT: vlse64.v v8, (a0), zero, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret - %head = insertelement poison, i64 %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_iv_nxv2i64( %va, %cond) { -; CHECK-LABEL: vmerge_iv_nxv2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma -; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i64 3, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_vv_nxv4i64( %va, %vb, %cond) { -; CHECK-LABEL: vmerge_vv_nxv4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vmerge_xv_nxv4i64( %va, i64 %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu -; CHECK-NEXT: vlse64.v v8, (a0), zero, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret - %head = insertelement poison, i64 %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_iv_nxv4i64( %va, %cond) { -; CHECK-LABEL: vmerge_iv_nxv4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma -; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i64 3, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_vv_nxv8i64( %va, %vb, %cond) { -; CHECK-LABEL: vmerge_vv_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 -; CHECK-NEXT: ret - %vc = select %cond, %va, %vb - ret %vc -} - -define @vmerge_xv_nxv8i64( %va, i64 %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; CHECK-NEXT: vlse64.v v8, (a0), zero, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret - %head = insertelement poison, i64 %b, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_iv_nxv8i64( %va, %cond) { -; CHECK-LABEL: vmerge_iv_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma -; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 -; CHECK-NEXT: ret - %head = insertelement poison, i64 3, i32 0 - %splat = shufflevector %head, poison, zeroinitializer - %vc = select %cond, %splat, %va - ret %vc -} - -define @vmerge_truelhs_nxv8i64_0( %va, %vb) { -; CHECK-LABEL: vmerge_truelhs_nxv8i64_0: -; CHECK: # %bb.0: -; CHECK-NEXT: ret - %mhead = insertelement poison, i1 1, i32 0 - %mtrue = shufflevector %mhead, poison, zeroinitializer - %vc = select %mtrue, %va, %vb - ret %vc -} - -define @vmerge_falselhs_nxv8i64_0( %va, %vb) { -; CHECK-LABEL: vmerge_falselhs_nxv8i64_0: -; CHECK: # %bb.0: -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret - %vc = select zeroinitializer, %va, %vb - ret %vc -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll similarity index 93% rename from llvm/test/CodeGen/RISCV/rvv/vselect-int-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vselect-int.ll index bc11386..19c7d59 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll @@ -1,5 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 define @vmerge_vv_nxv1i8( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv1i8: @@ -658,11 +661,23 @@ define @vmerge_vv_nxv1i64( %va, @vmerge_xv_nxv1i64( %va, i64 %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv1i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret +; RV32-LABEL: vmerge_xv_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v8, (a0), zero, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vmerge_xv_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma +; RV64-NEXT: vmerge.vxm v8, v8, a0, v0 +; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = select %cond, %splat, %va @@ -692,11 +707,23 @@ define @vmerge_vv_nxv2i64( %va, @vmerge_xv_nxv2i64( %va, i64 %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret +; RV32-LABEL: vmerge_xv_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v8, (a0), zero, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vmerge_xv_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma +; RV64-NEXT: vmerge.vxm v8, v8, a0, v0 +; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = select %cond, %splat, %va @@ -726,11 +753,23 @@ define @vmerge_vv_nxv4i64( %va, @vmerge_xv_nxv4i64( %va, i64 %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma -; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret +; RV32-LABEL: vmerge_xv_nxv4i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v8, (a0), zero, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vmerge_xv_nxv4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma +; RV64-NEXT: vmerge.vxm v8, v8, a0, v0 +; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = select %cond, %splat, %va @@ -760,11 +799,23 @@ define @vmerge_vv_nxv8i64( %va, @vmerge_xv_nxv8i64( %va, i64 %b, %cond) { -; CHECK-LABEL: vmerge_xv_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 -; CHECK-NEXT: ret +; RV32-LABEL: vmerge_xv_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vlse64.v v8, (a0), zero, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vmerge_xv_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma +; RV64-NEXT: vmerge.vxm v8, v8, a0, v0 +; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = select %cond, %splat, %va diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll deleted file mode 100644 index 59407e8..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll +++ /dev/null @@ -1,2800 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vxor.nxv1i8.nxv1i8( - , - , - , - i64); - -define @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vxor.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv1i8.nxv1i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv1i8.nxv1i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv2i8.nxv2i8( - , - , - , - i64); - -define @intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vxor.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv2i8.nxv2i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv2i8.nxv2i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv4i8.nxv4i8( - , - , - , - i64); - -define @intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vxor.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv4i8.nxv4i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv4i8.nxv4i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv8i8.nxv8i8( - , - , - , - i64); - -define @intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vxor.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv8i8.nxv8i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv8i8.nxv8i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv16i8.nxv16i8( - , - , - , - i64); - -define @intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vxor.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv16i8.nxv16i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv16i8.nxv16i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vxor.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv32i8.nxv32i8( - , - , - , - i64); - -define @intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vxor.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv32i8.nxv32i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv32i8.nxv32i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vxor.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv64i8.nxv64i8( - , - , - , - i64); - -define @intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vxor.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv64i8.nxv64i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv64i8.nxv64i8( - , - , - , - , - i64, - i64); - -define @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv1i16.nxv1i16( - , - , - , - i64); - -define @intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vxor.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv1i16.nxv1i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv1i16.nxv1i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv2i16.nxv2i16( - , - , - , - i64); - -define @intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vxor.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv2i16.nxv2i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv2i16.nxv2i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv4i16.nxv4i16( - , - , - , - i64); - -define @intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vxor.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv4i16.nxv4i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv4i16.nxv4i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv8i16.nxv8i16( - , - , - , - i64); - -define @intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vxor.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv8i16.nxv8i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv8i16.nxv8i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vxor.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv16i16.nxv16i16( - , - , - , - i64); - -define @intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vxor.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv16i16.nxv16i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv16i16.nxv16i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vxor.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv32i16.nxv32i16( - , - , - , - i64); - -define @intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vxor.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv32i16.nxv32i16( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv32i16.nxv32i16( - , - , - , - , - i64, - i64); - -define @intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv1i32.nxv1i32( - , - , - , - i64); - -define @intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vxor.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv1i32.nxv1i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv1i32.nxv1i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv2i32.nxv2i32( - , - , - , - i64); - -define @intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vxor.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv2i32.nxv2i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv2i32.nxv2i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv4i32.nxv4i32( - , - , - , - i64); - -define @intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vxor.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv4i32.nxv4i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv4i32.nxv4i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vxor.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv8i32.nxv8i32( - , - , - , - i64); - -define @intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vxor.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv8i32.nxv8i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv8i32.nxv8i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vxor.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv16i32.nxv16i32( - , - , - , - i64); - -define @intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vxor.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv16i32.nxv16i32( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv16i32.nxv16i32( - , - , - , - , - i64, - i64); - -define @intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv1i64.nxv1i64( - , - , - , - i64); - -define @intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vxor.vv v8, v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv1i64.nxv1i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv1i64.nxv1i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv2i64.nxv2i64( - , - , - , - i64); - -define @intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vxor.vv v8, v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv2i64.nxv2i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv2i64.nxv2i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vxor.vv v8, v10, v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv4i64.nxv4i64( - , - , - , - i64); - -define @intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vxor.vv v8, v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv4i64.nxv4i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv4i64.nxv4i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vxor.vv v8, v12, v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv8i64.nxv8i64( - , - , - , - i64); - -define @intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vxor.vv v8, v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv8i64.nxv8i64( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv8i64.nxv8i64( - , - , - , - , - i64, - i64); - -define @intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64( - %0, - %1, - %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv1i8.i8( - , - , - i8, - i64); - -define @intrinsic_vxor_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vx_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv1i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv1i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv1i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv2i8.i8( - , - , - i8, - i64); - -define @intrinsic_vxor_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vx_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv2i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv2i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv2i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv4i8.i8( - , - , - i8, - i64); - -define @intrinsic_vxor_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vx_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv4i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv4i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv4i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv8i8.i8( - , - , - i8, - i64); - -define @intrinsic_vxor_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vx_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv8i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv8i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv8i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv16i8.i8( - , - , - i8, - i64); - -define @intrinsic_vxor_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vx_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv16i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv16i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vxor.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv16i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv32i8.i8( - , - , - i8, - i64); - -define @intrinsic_vxor_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vx_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv32i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv32i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vxor.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv32i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv64i8.i8( - , - , - i8, - i64); - -define @intrinsic_vxor_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vx_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv64i8.i8( - undef, - %0, - i8 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv64i8.i8( - , - , - i8, - , - i64, - i64); - -define @intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vxor.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv64i8.i8( - %0, - %1, - i8 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv1i16.i16( - , - , - i16, - i64); - -define @intrinsic_vxor_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vx_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv1i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv1i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv1i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv2i16.i16( - , - , - i16, - i64); - -define @intrinsic_vxor_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vx_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv2i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv2i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv2i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv4i16.i16( - , - , - i16, - i64); - -define @intrinsic_vxor_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vx_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv4i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv4i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv4i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv8i16.i16( - , - , - i16, - i64); - -define @intrinsic_vxor_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vx_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv8i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv8i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vxor.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv8i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv16i16.i16( - , - , - i16, - i64); - -define @intrinsic_vxor_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vx_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv16i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv16i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vxor.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv16i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv32i16.i16( - , - , - i16, - i64); - -define @intrinsic_vxor_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vx_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv32i16.i16( - undef, - %0, - i16 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv32i16.i16( - , - , - i16, - , - i64, - i64); - -define @intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vxor.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv32i16.i16( - %0, - %1, - i16 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv1i32.i32( - , - , - i32, - i64); - -define @intrinsic_vxor_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vx_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv1i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv1i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv1i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv2i32.i32( - , - , - i32, - i64); - -define @intrinsic_vxor_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vx_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv2i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv2i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv2i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv4i32.i32( - , - , - i32, - i64); - -define @intrinsic_vxor_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vx_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv4i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv4i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vxor.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv4i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv8i32.i32( - , - , - i32, - i64); - -define @intrinsic_vxor_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vx_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv8i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv8i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vxor.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv8i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv16i32.i32( - , - , - i32, - i64); - -define @intrinsic_vxor_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vx_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv16i32.i32( - undef, - %0, - i32 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv16i32.i32( - , - , - i32, - , - i64, - i64); - -define @intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vxor.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv16i32.i32( - %0, - %1, - i32 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv1i64.i64( - , - , - i64, - i64); - -define @intrinsic_vxor_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv1i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv1i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv2i64.i64( - , - , - i64, - i64); - -define @intrinsic_vxor_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv2i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv2i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vxor.vx v8, v10, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv4i64.i64( - , - , - i64, - i64); - -define @intrinsic_vxor_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv4i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv4i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vxor.vx v8, v12, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -declare @llvm.riscv.vxor.nxv8i64.i64( - , - , - i64, - i64); - -define @intrinsic_vxor_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv8i64.i64( - undef, - %0, - i64 %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vxor.mask.nxv8i64.i64( - , - , - i64, - , - i64, - i64); - -define @intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vxor.vx v8, v16, a0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv8i64.i64( - %0, - %1, - i64 %2, - %3, - i64 %4, i64 1) - - ret %a -} - -define @intrinsic_vxor_vi_nxv1i8_nxv1i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vxor_vi_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vxor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv1i8.i8( - undef, - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv1i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vxor_vi_nxv2i8_nxv2i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vxor_vi_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vxor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv2i8.i8( - undef, - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv2i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vxor_vi_nxv4i8_nxv4i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vxor_vi_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vxor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv4i8.i8( - undef, - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv4i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vxor_vi_nxv8i8_nxv8i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vxor_vi_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vxor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv8i8.i8( - undef, - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv8i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vxor_vi_nxv16i8_nxv16i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vxor_vi_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vxor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv16i8.i8( - undef, - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv16i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vxor_vi_nxv32i8_nxv32i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vxor_vi_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vxor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv32i8.i8( - undef, - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vxor.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv32i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vxor_vi_nxv64i8_nxv64i8_i8( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vxor_vi_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vxor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv64i8.i8( - undef, - %0, - i8 9, - i64 %1) - - ret %a -} - -define @intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vxor.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv64i8.i8( - %0, - %1, - i8 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vxor_vi_nxv1i16_nxv1i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vxor_vi_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vxor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv1i16.i16( - undef, - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv1i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vxor_vi_nxv2i16_nxv2i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vxor_vi_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vxor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv2i16.i16( - undef, - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv2i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vxor_vi_nxv4i16_nxv4i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vxor_vi_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vxor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv4i16.i16( - undef, - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv4i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vxor_vi_nxv8i16_nxv8i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vxor_vi_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vxor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv8i16.i16( - undef, - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv8i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vxor_vi_nxv16i16_nxv16i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vxor_vi_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vxor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv16i16.i16( - undef, - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vxor.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv16i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vxor_vi_nxv32i16_nxv32i16_i16( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vxor_vi_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: vxor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv32i16.i16( - undef, - %0, - i16 9, - i64 %1) - - ret %a -} - -define @intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vxor.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv32i16.i16( - %0, - %1, - i16 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vxor_vi_nxv1i32_nxv1i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vxor_vi_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: vxor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv1i32.i32( - undef, - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv1i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vxor_vi_nxv2i32_nxv2i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vxor_vi_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vxor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv2i32.i32( - undef, - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv2i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vxor_vi_nxv4i32_nxv4i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vxor_vi_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: vxor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv4i32.i32( - undef, - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv4i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vxor_vi_nxv8i32_nxv8i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vxor_vi_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vxor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv8i32.i32( - undef, - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vxor.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv8i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vxor_vi_nxv16i32_nxv16i32_i32( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vxor_vi_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: vxor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv16i32.i32( - undef, - %0, - i32 9, - i64 %1) - - ret %a -} - -define @intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vxor.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv16i32.i32( - %0, - %1, - i32 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vxor_vi_nxv1i64_nxv1i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vxor_vi_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vxor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv1i64.i64( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv1i64.i64( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vxor_vi_nxv2i64_nxv2i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vxor_vi_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vxor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv2i64.i64( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v10, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv2i64.i64( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vxor_vi_nxv4i64_nxv4i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vxor_vi_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vxor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv4i64.i64( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vxor.vi v8, v12, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv4i64.i64( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} - -define @intrinsic_vxor_vi_nxv8i64_nxv8i64_i64( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vxor_vi_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vxor.vi v8, v8, 9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.nxv8i64.i64( - undef, - %0, - i64 9, - i64 %1) - - ret %a -} - -define @intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vxor.vi v8, v16, 9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vxor.mask.nxv8i64.i64( - %0, - %1, - i64 9, - %2, - i64 %3, i64 1) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vxor.ll similarity index 83% rename from llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vxor.ll index b83f2ab..3ec594d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor.ll @@ -1,13 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vxor.nxv1i8.nxv1i8( , , , - i32); + iXLen); -define @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -18,7 +20,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -28,10 +30,10 @@ declare @llvm.riscv.vxor.mask.nxv1i8.nxv1i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -43,7 +45,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -52,9 +54,9 @@ declare @llvm.riscv.vxor.nxv2i8.nxv2i8( , , , - i32); + iXLen); -define @intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -65,7 +67,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -75,10 +77,10 @@ declare @llvm.riscv.vxor.mask.nxv2i8.nxv2i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -90,7 +92,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -99,9 +101,9 @@ declare @llvm.riscv.vxor.nxv4i8.nxv4i8( , , , - i32); + iXLen); -define @intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -112,7 +114,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -122,10 +124,10 @@ declare @llvm.riscv.vxor.mask.nxv4i8.nxv4i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -137,7 +139,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -146,9 +148,9 @@ declare @llvm.riscv.vxor.nxv8i8.nxv8i8( , , , - i32); + iXLen); -define @intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -159,7 +161,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -169,10 +171,10 @@ declare @llvm.riscv.vxor.mask.nxv8i8.nxv8i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -184,7 +186,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -193,9 +195,9 @@ declare @llvm.riscv.vxor.nxv16i8.nxv16i8( , , , - i32); + iXLen); -define @intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -206,7 +208,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -216,10 +218,10 @@ declare @llvm.riscv.vxor.mask.nxv16i8.nxv16i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -231,7 +233,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -240,9 +242,9 @@ declare @llvm.riscv.vxor.nxv32i8.nxv32i8( , , , - i32); + iXLen); -define @intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -253,7 +255,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -263,10 +265,10 @@ declare @llvm.riscv.vxor.mask.nxv32i8.nxv32i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -278,7 +280,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -287,9 +289,9 @@ declare @llvm.riscv.vxor.nxv64i8.nxv64i8( , , , - i32); + iXLen); -define @intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma @@ -300,7 +302,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -310,10 +312,10 @@ declare @llvm.riscv.vxor.mask.nxv64i8.nxv64i8( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) @@ -326,7 +328,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -335,9 +337,9 @@ declare @llvm.riscv.vxor.nxv1i16.nxv1i16( , , , - i32); + iXLen); -define @intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -348,7 +350,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -358,10 +360,10 @@ declare @llvm.riscv.vxor.mask.nxv1i16.nxv1i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -373,7 +375,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -382,9 +384,9 @@ declare @llvm.riscv.vxor.nxv2i16.nxv2i16( , , , - i32); + iXLen); -define @intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -395,7 +397,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -405,10 +407,10 @@ declare @llvm.riscv.vxor.mask.nxv2i16.nxv2i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -420,7 +422,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -429,9 +431,9 @@ declare @llvm.riscv.vxor.nxv4i16.nxv4i16( , , , - i32); + iXLen); -define @intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -442,7 +444,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -452,10 +454,10 @@ declare @llvm.riscv.vxor.mask.nxv4i16.nxv4i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -467,7 +469,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -476,9 +478,9 @@ declare @llvm.riscv.vxor.nxv8i16.nxv8i16( , , , - i32); + iXLen); -define @intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -489,7 +491,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -499,10 +501,10 @@ declare @llvm.riscv.vxor.mask.nxv8i16.nxv8i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -514,7 +516,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -523,9 +525,9 @@ declare @llvm.riscv.vxor.nxv16i16.nxv16i16( , , , - i32); + iXLen); -define @intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -536,7 +538,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -546,10 +548,10 @@ declare @llvm.riscv.vxor.mask.nxv16i16.nxv16i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -561,7 +563,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -570,9 +572,9 @@ declare @llvm.riscv.vxor.nxv32i16.nxv32i16( , , , - i32); + iXLen); -define @intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma @@ -583,7 +585,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -593,10 +595,10 @@ declare @llvm.riscv.vxor.mask.nxv32i16.nxv32i16( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) @@ -609,7 +611,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -618,9 +620,9 @@ declare @llvm.riscv.vxor.nxv1i32.nxv1i32( , , , - i32); + iXLen); -define @intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -631,7 +633,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -641,10 +643,10 @@ declare @llvm.riscv.vxor.mask.nxv1i32.nxv1i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -656,7 +658,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -665,9 +667,9 @@ declare @llvm.riscv.vxor.nxv2i32.nxv2i32( , , , - i32); + iXLen); -define @intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -678,7 +680,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -688,10 +690,10 @@ declare @llvm.riscv.vxor.mask.nxv2i32.nxv2i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -703,7 +705,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -712,9 +714,9 @@ declare @llvm.riscv.vxor.nxv4i32.nxv4i32( , , , - i32); + iXLen); -define @intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -725,7 +727,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -735,10 +737,10 @@ declare @llvm.riscv.vxor.mask.nxv4i32.nxv4i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -750,7 +752,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -759,9 +761,9 @@ declare @llvm.riscv.vxor.nxv8i32.nxv8i32( , , , - i32); + iXLen); -define @intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -772,7 +774,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -782,10 +784,10 @@ declare @llvm.riscv.vxor.mask.nxv8i32.nxv8i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -797,7 +799,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -806,9 +808,9 @@ declare @llvm.riscv.vxor.nxv16i32.nxv16i32( , , , - i32); + iXLen); -define @intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -819,7 +821,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -829,10 +831,10 @@ declare @llvm.riscv.vxor.mask.nxv16i32.nxv16i32( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) @@ -845,7 +847,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -854,9 +856,9 @@ declare @llvm.riscv.vxor.nxv1i64.nxv1i64( , , , - i32); + iXLen); -define @intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -867,7 +869,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -877,10 +879,10 @@ declare @llvm.riscv.vxor.mask.nxv1i64.nxv1i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -892,7 +894,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -901,9 +903,9 @@ declare @llvm.riscv.vxor.nxv2i64.nxv2i64( , , , - i32); + iXLen); -define @intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -914,7 +916,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -924,10 +926,10 @@ declare @llvm.riscv.vxor.mask.nxv2i64.nxv2i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -939,7 +941,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -948,9 +950,9 @@ declare @llvm.riscv.vxor.nxv4i64.nxv4i64( , , , - i32); + iXLen); -define @intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -961,7 +963,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -971,10 +973,10 @@ declare @llvm.riscv.vxor.mask.nxv4i64.nxv4i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -986,7 +988,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -995,9 +997,9 @@ declare @llvm.riscv.vxor.nxv8i64.nxv8i64( , , , - i32); + iXLen); -define @intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma @@ -1008,7 +1010,7 @@ entry: undef, %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -1018,10 +1020,10 @@ declare @llvm.riscv.vxor.mask.nxv8i64.nxv8i64( , , , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) @@ -1034,7 +1036,7 @@ entry: %1, %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1043,9 +1045,9 @@ declare @llvm.riscv.vxor.nxv1i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vxor_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vxor_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -1056,7 +1058,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1066,10 +1068,10 @@ declare @llvm.riscv.vxor.mask.nxv1i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu @@ -1081,7 +1083,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1090,9 +1092,9 @@ declare @llvm.riscv.vxor.nxv2i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vxor_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vxor_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -1103,7 +1105,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1113,10 +1115,10 @@ declare @llvm.riscv.vxor.mask.nxv2i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu @@ -1128,7 +1130,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1137,9 +1139,9 @@ declare @llvm.riscv.vxor.nxv4i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vxor_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vxor_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -1150,7 +1152,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1160,10 +1162,10 @@ declare @llvm.riscv.vxor.mask.nxv4i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu @@ -1175,7 +1177,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1184,9 +1186,9 @@ declare @llvm.riscv.vxor.nxv8i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vxor_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vxor_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -1197,7 +1199,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1207,10 +1209,10 @@ declare @llvm.riscv.vxor.mask.nxv8i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu @@ -1222,7 +1224,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1231,9 +1233,9 @@ declare @llvm.riscv.vxor.nxv16i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vxor_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vxor_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -1244,7 +1246,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1254,10 +1256,10 @@ declare @llvm.riscv.vxor.mask.nxv16i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu @@ -1269,7 +1271,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1278,9 +1280,9 @@ declare @llvm.riscv.vxor.nxv32i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vxor_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vxor_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -1291,7 +1293,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1301,10 +1303,10 @@ declare @llvm.riscv.vxor.mask.nxv32i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu @@ -1316,7 +1318,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1325,9 +1327,9 @@ declare @llvm.riscv.vxor.nxv64i8.i8( , , i8, - i32); + iXLen); -define @intrinsic_vxor_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { +define @intrinsic_vxor_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma @@ -1338,7 +1340,7 @@ entry: undef, %0, i8 %1, - i32 %2) + iXLen %2) ret %a } @@ -1348,10 +1350,10 @@ declare @llvm.riscv.vxor.mask.nxv64i8.i8( , i8, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu @@ -1363,7 +1365,7 @@ entry: %1, i8 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1372,9 +1374,9 @@ declare @llvm.riscv.vxor.nxv1i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vxor_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vxor_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma @@ -1385,7 +1387,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1395,10 +1397,10 @@ declare @llvm.riscv.vxor.mask.nxv1i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu @@ -1410,7 +1412,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1419,9 +1421,9 @@ declare @llvm.riscv.vxor.nxv2i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vxor_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vxor_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma @@ -1432,7 +1434,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1442,10 +1444,10 @@ declare @llvm.riscv.vxor.mask.nxv2i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu @@ -1457,7 +1459,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1466,9 +1468,9 @@ declare @llvm.riscv.vxor.nxv4i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vxor_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vxor_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma @@ -1479,7 +1481,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1489,10 +1491,10 @@ declare @llvm.riscv.vxor.mask.nxv4i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -1504,7 +1506,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1513,9 +1515,9 @@ declare @llvm.riscv.vxor.nxv8i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vxor_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vxor_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma @@ -1526,7 +1528,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1536,10 +1538,10 @@ declare @llvm.riscv.vxor.mask.nxv8i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -1551,7 +1553,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1560,9 +1562,9 @@ declare @llvm.riscv.vxor.nxv16i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vxor_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vxor_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma @@ -1573,7 +1575,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1583,10 +1585,10 @@ declare @llvm.riscv.vxor.mask.nxv16i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu @@ -1598,7 +1600,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1607,9 +1609,9 @@ declare @llvm.riscv.vxor.nxv32i16.i16( , , i16, - i32); + iXLen); -define @intrinsic_vxor_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { +define @intrinsic_vxor_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma @@ -1620,7 +1622,7 @@ entry: undef, %0, i16 %1, - i32 %2) + iXLen %2) ret %a } @@ -1630,10 +1632,10 @@ declare @llvm.riscv.vxor.mask.nxv32i16.i16( , i16, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu @@ -1645,7 +1647,7 @@ entry: %1, i16 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1654,9 +1656,9 @@ declare @llvm.riscv.vxor.nxv1i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vxor_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vxor_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma @@ -1667,7 +1669,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1677,10 +1679,10 @@ declare @llvm.riscv.vxor.mask.nxv1i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu @@ -1692,7 +1694,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1701,9 +1703,9 @@ declare @llvm.riscv.vxor.nxv2i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vxor_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vxor_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -1714,7 +1716,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1724,10 +1726,10 @@ declare @llvm.riscv.vxor.mask.nxv2i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu @@ -1739,7 +1741,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1748,9 +1750,9 @@ declare @llvm.riscv.vxor.nxv4i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vxor_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vxor_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma @@ -1761,7 +1763,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1771,10 +1773,10 @@ declare @llvm.riscv.vxor.mask.nxv4i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -1786,7 +1788,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1795,9 +1797,9 @@ declare @llvm.riscv.vxor.nxv8i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vxor_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vxor_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma @@ -1808,7 +1810,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1818,10 +1820,10 @@ declare @llvm.riscv.vxor.mask.nxv8i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -1833,7 +1835,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1842,9 +1844,9 @@ declare @llvm.riscv.vxor.nxv16i32.i32( , , i32, - i32); + iXLen); -define @intrinsic_vxor_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { +define @intrinsic_vxor_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma @@ -1855,7 +1857,7 @@ entry: undef, %0, i32 %1, - i32 %2) + iXLen %2) ret %a } @@ -1865,10 +1867,10 @@ declare @llvm.riscv.vxor.mask.nxv16i32.i32( , i32, , - i32, - i32); + iXLen, + iXLen); -define @intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +define @intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu @@ -1880,7 +1882,7 @@ entry: %1, i32 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1889,26 +1891,32 @@ declare @llvm.riscv.vxor.nxv1i64.i64( , , i64, - i32); - -define @intrinsic_vxor_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vxor.vv v8, v8, v9 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vxor_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vxor_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vxor.vv v8, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vxor_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vxor.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1918,28 +1926,34 @@ declare @llvm.riscv.vxor.mask.nxv1i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vxor.vv v8, v9, v10, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vxor.vx v8, v9, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vxor.mask.nxv1i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -1948,26 +1962,32 @@ declare @llvm.riscv.vxor.nxv2i64.i64( , , i64, - i32); - -define @intrinsic_vxor_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vxor.vv v8, v8, v10 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vxor_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vxor_vx_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vxor.vv v8, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vxor_vx_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vxor.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -1977,28 +1997,34 @@ declare @llvm.riscv.vxor.mask.nxv2i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vxor.vv v8, v10, v12, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vxor.vv v8, v10, v12, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vxor.vx v8, v10, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vxor.mask.nxv2i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -2007,26 +2033,32 @@ declare @llvm.riscv.vxor.nxv4i64.i64( , , i64, - i32); - -define @intrinsic_vxor_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vxor.vv v8, v8, v12 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vxor_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vxor_vx_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vxor.vv v8, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vxor_vx_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vxor.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -2036,28 +2068,34 @@ declare @llvm.riscv.vxor.mask.nxv4i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vxor.vv v8, v12, v16, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vxor.vv v8, v12, v16, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vxor.vx v8, v12, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vxor.mask.nxv4i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } @@ -2066,26 +2104,32 @@ declare @llvm.riscv.vxor.nxv8i64.i64( , , i64, - i32); - -define @intrinsic_vxor_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vxor_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vxor.vv v8, v8, v16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen); + +define @intrinsic_vxor_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vxor_vx_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vxor.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vxor_vx_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vxor.vx v8, v8, a0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i64.i64( undef, %0, i64 %1, - i32 %2) + iXLen %2) ret %a } @@ -2095,33 +2139,39 @@ declare @llvm.riscv.vxor.mask.nxv8i64.i64( , i64, , - i32, - i32); - -define @intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret + iXLen, + iXLen); + +define @intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vxor.vv v8, v16, v24, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vxor.vx v8, v16, a0, v0.t +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vxor.mask.nxv8i64.i64( %0, %1, i64 %2, %3, - i32 %4, i32 1) + iXLen %4, iXLen 1) ret %a } -define @intrinsic_vxor_vi_nxv1i8_nxv1i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vxor_vi_nxv1i8_nxv1i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -2132,12 +2182,12 @@ entry: undef, %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -2149,12 +2199,12 @@ entry: %1, i8 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vxor_vi_nxv2i8_nxv2i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vxor_vi_nxv2i8_nxv2i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -2165,12 +2215,12 @@ entry: undef, %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -2182,12 +2232,12 @@ entry: %1, i8 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vxor_vi_nxv4i8_nxv4i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vxor_vi_nxv4i8_nxv4i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -2198,12 +2248,12 @@ entry: undef, %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -2215,12 +2265,12 @@ entry: %1, i8 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vxor_vi_nxv8i8_nxv8i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vxor_vi_nxv8i8_nxv8i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -2231,12 +2281,12 @@ entry: undef, %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -2248,12 +2298,12 @@ entry: %1, i8 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vxor_vi_nxv16i8_nxv16i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vxor_vi_nxv16i8_nxv16i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -2264,12 +2314,12 @@ entry: undef, %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -2281,12 +2331,12 @@ entry: %1, i8 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vxor_vi_nxv32i8_nxv32i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vxor_vi_nxv32i8_nxv32i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -2297,12 +2347,12 @@ entry: undef, %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -2314,12 +2364,12 @@ entry: %1, i8 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vxor_vi_nxv64i8_nxv64i8_i8( %0, i32 %1) nounwind { +define @intrinsic_vxor_vi_nxv64i8_nxv64i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma @@ -2330,12 +2380,12 @@ entry: undef, %0, i8 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu @@ -2347,12 +2397,12 @@ entry: %1, i8 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vxor_vi_nxv1i16_nxv1i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vxor_vi_nxv1i16_nxv1i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma @@ -2363,12 +2413,12 @@ entry: undef, %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -2380,12 +2430,12 @@ entry: %1, i16 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vxor_vi_nxv2i16_nxv2i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vxor_vi_nxv2i16_nxv2i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma @@ -2396,12 +2446,12 @@ entry: undef, %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -2413,12 +2463,12 @@ entry: %1, i16 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vxor_vi_nxv4i16_nxv4i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vxor_vi_nxv4i16_nxv4i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma @@ -2429,12 +2479,12 @@ entry: undef, %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -2446,12 +2496,12 @@ entry: %1, i16 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vxor_vi_nxv8i16_nxv8i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vxor_vi_nxv8i16_nxv8i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma @@ -2462,12 +2512,12 @@ entry: undef, %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -2479,12 +2529,12 @@ entry: %1, i16 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vxor_vi_nxv16i16_nxv16i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vxor_vi_nxv16i16_nxv16i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma @@ -2495,12 +2545,12 @@ entry: undef, %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -2512,12 +2562,12 @@ entry: %1, i16 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vxor_vi_nxv32i16_nxv32i16_i16( %0, i32 %1) nounwind { +define @intrinsic_vxor_vi_nxv32i16_nxv32i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma @@ -2528,12 +2578,12 @@ entry: undef, %0, i16 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -2545,12 +2595,12 @@ entry: %1, i16 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vxor_vi_nxv1i32_nxv1i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vxor_vi_nxv1i32_nxv1i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma @@ -2561,12 +2611,12 @@ entry: undef, %0, i32 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -2578,12 +2628,12 @@ entry: %1, i32 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vxor_vi_nxv2i32_nxv2i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vxor_vi_nxv2i32_nxv2i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma @@ -2594,12 +2644,12 @@ entry: undef, %0, i32 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -2611,12 +2661,12 @@ entry: %1, i32 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vxor_vi_nxv4i32_nxv4i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vxor_vi_nxv4i32_nxv4i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -2627,12 +2677,12 @@ entry: undef, %0, i32 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -2644,12 +2694,12 @@ entry: %1, i32 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vxor_vi_nxv8i32_nxv8i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vxor_vi_nxv8i32_nxv8i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -2660,12 +2710,12 @@ entry: undef, %0, i32 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -2677,12 +2727,12 @@ entry: %1, i32 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vxor_vi_nxv16i32_nxv16i32_i32( %0, i32 %1) nounwind { +define @intrinsic_vxor_vi_nxv16i32_nxv16i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -2693,12 +2743,12 @@ entry: undef, %0, i32 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -2710,12 +2760,12 @@ entry: %1, i32 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vxor_vi_nxv1i64_nxv1i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vxor_vi_nxv1i64_nxv1i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -2726,12 +2776,12 @@ entry: undef, %0, i64 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -2743,12 +2793,12 @@ entry: %1, i64 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vxor_vi_nxv2i64_nxv2i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vxor_vi_nxv2i64_nxv2i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma @@ -2759,12 +2809,12 @@ entry: undef, %0, i64 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -2776,12 +2826,12 @@ entry: %1, i64 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vxor_vi_nxv4i64_nxv4i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vxor_vi_nxv4i64_nxv4i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma @@ -2792,12 +2842,12 @@ entry: undef, %0, i64 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -2809,12 +2859,12 @@ entry: %1, i64 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -define @intrinsic_vxor_vi_nxv8i64_nxv8i64_i64( %0, i32 %1) nounwind { +define @intrinsic_vxor_vi_nxv8i64_nxv8i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma @@ -2825,12 +2875,12 @@ entry: undef, %0, i64 9, - i32 %1) + iXLen %1) ret %a } -define @intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -2842,7 +2892,7 @@ entry: %1, i64 9, %2, - i32 %3, i32 1) + iXLen %3, iXLen 1) ret %a } -- 2.7.4