From 09468a914827b33fe6fe1fcf05ba87c60709d49d Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Fri, 18 Dec 2020 11:22:43 -0800 Subject: [PATCH] [RISCV] Sign extend constant arguments to V intrinsics when promoting to XLen. The default behavior for any_extend of a constant is to zero extend. This occurs inside of getNode rather than allowing type legalization to promote the constant which would sign extend. By using sign extend with getNode the constant will be sign extended. This gives a better chance for isel to find a simm5 immediate since all xlen bits are examined there. For instructions that use a uimm5 immediate, this change only affects constants >= 128 for i8 or >= 32768 for i16. Constants that large already wouldn't have been eligible for uimm5 and would need to use a scalar register. If the instruction isn't able to use simm5 or the immediate is too large, we'll need to materialize the immediate in a register. As far as I know constants with all 1s in the upper bits should materialize as well or better than all 0s. Longer term we should probably have a SEW aware PatFrag to ignore the bits above SEW before checking simm5. I updated about half the test cases in some tests to use a negative constant to get coverage for this. Reviewed By: evandro Differential Revision: https://reviews.llvm.org/D93487 --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 21 ++++-- llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll | 36 +++++----- llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll | 44 ++++++------- llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll | 76 +++++++++++----------- llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll | 36 +++++----- llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll | 44 ++++++------- llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll | 36 +++++----- llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll | 72 ++++++++++---------- 8 files changed, 188 insertions(+), 177 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index a484669..5334666 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1045,8 +1045,13 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, EVT OpVT = ScalarOp.getValueType(); if (OpVT == MVT::i8 || OpVT == MVT::i16 || (OpVT == MVT::i32 && Subtarget.is64Bit())) { - ScalarOp = - DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), ScalarOp); + // If the operand is a constant, sign extend to increase our chances + // of being able to use a .vi instruction. ANY_EXTEND would become a + // a zero extend and the simm5 check in isel would fail. + // FIXME: Should we ignore the upper bits in isel instead? + unsigned ExtOpc = isa(ScalarOp) ? ISD::SIGN_EXTEND + : ISD::ANY_EXTEND; + ScalarOp = DAG.getNode(ExtOpc, DL, Subtarget.getXLenVT(), ScalarOp); return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(), Operands); } @@ -1087,9 +1092,15 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, EVT OpVT = ScalarOp.getValueType(); if (OpVT == MVT::i8 || OpVT == MVT::i16 || (OpVT == MVT::i32 && Subtarget.is64Bit())) { - ScalarOp = - DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), ScalarOp); - return DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, Op->getVTList(), Operands); + // If the operand is a constant, sign extend to increase our chances + // of being able to use a .vi instruction. ANY_EXTEND would become a + // a zero extend and the simm5 check in isel would fail. + // FIXME: Should we ignore the upper bits in isel instead? + unsigned ExtOpc = isa(ScalarOp) ? ISD::SIGN_EXTEND + : ISD::ANY_EXTEND; + ScalarOp = DAG.getNode(ExtOpc, DL, Subtarget.getXLenVT(), ScalarOp); + return DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, Op->getVTList(), + Operands); } } } diff --git a/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll index 0557e58..b4861b3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll @@ -724,10 +724,10 @@ define @intrinsic_vadc_vim_nxv1i8_nxv1i8_i8( entry: ; CHECK-LABEL: intrinsic_vadc_vim_nxv1i8_nxv1i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vadc.nxv1i8.i8( %0, - i8 9, + i8 -9, %1, i32 %2) @@ -752,10 +752,10 @@ define @intrinsic_vadc_vim_nxv4i8_nxv4i8_i8( entry: ; CHECK-LABEL: intrinsic_vadc_vim_nxv4i8_nxv4i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vadc.nxv4i8.i8( %0, - i8 9, + i8 -9, %1, i32 %2) @@ -780,10 +780,10 @@ define @intrinsic_vadc_vim_nxv16i8_nxv16i8_i8( @llvm.riscv.vadc.nxv16i8.i8( %0, - i8 9, + i8 -9, %1, i32 %2) @@ -808,10 +808,10 @@ define @intrinsic_vadc_vim_nxv64i8_nxv64i8_i8( @llvm.riscv.vadc.nxv64i8.i8( %0, - i8 9, + i8 -9, %1, i32 %2) @@ -836,10 +836,10 @@ define @intrinsic_vadc_vim_nxv2i16_nxv2i16_i16( @llvm.riscv.vadc.nxv2i16.i16( %0, - i16 9, + i16 -9, %1, i32 %2) @@ -864,10 +864,10 @@ define @intrinsic_vadc_vim_nxv8i16_nxv8i16_i16( @llvm.riscv.vadc.nxv8i16.i16( %0, - i16 9, + i16 -9, %1, i32 %2) @@ -892,10 +892,10 @@ define @intrinsic_vadc_vim_nxv32i16_nxv32i16_i16( @llvm.riscv.vadc.nxv32i16.i16( %0, - i16 9, + i16 -9, %1, i32 %2) @@ -920,10 +920,10 @@ define @intrinsic_vadc_vim_nxv2i32_nxv2i32_i32( @llvm.riscv.vadc.nxv2i32.i32( %0, - i32 9, + i32 -9, %1, i32 %2) @@ -948,10 +948,10 @@ define @intrinsic_vadc_vim_nxv8i32_nxv8i32_i32( @llvm.riscv.vadc.nxv8i32.i32( %0, - i32 9, + i32 -9, %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll index 4c3e9a3..7e546bf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll @@ -898,10 +898,10 @@ define @intrinsic_vadc_vim_nxv2i8_nxv2i8_i8( entry: ; CHECK-LABEL: intrinsic_vadc_vim_nxv2i8_nxv2i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vadc.nxv2i8.i8( %0, - i8 9, + i8 -9, %1, i64 %2) @@ -926,10 +926,10 @@ define @intrinsic_vadc_vim_nxv8i8_nxv8i8_i8( entry: ; CHECK-LABEL: intrinsic_vadc_vim_nxv8i8_nxv8i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vadc.nxv8i8.i8( %0, - i8 9, + i8 -9, %1, i64 %2) @@ -954,10 +954,10 @@ define @intrinsic_vadc_vim_nxv32i8_nxv32i8_i8( @llvm.riscv.vadc.nxv32i8.i8( %0, - i8 9, + i8 -9, %1, i64 %2) @@ -982,10 +982,10 @@ define @intrinsic_vadc_vim_nxv1i16_nxv1i16_i16( @llvm.riscv.vadc.nxv1i16.i16( %0, - i16 9, + i16 -9, %1, i64 %2) @@ -1010,10 +1010,10 @@ define @intrinsic_vadc_vim_nxv4i16_nxv4i16_i16( @llvm.riscv.vadc.nxv4i16.i16( %0, - i16 9, + i16 -9, %1, i64 %2) @@ -1038,10 +1038,10 @@ define @intrinsic_vadc_vim_nxv16i16_nxv16i16_i16( @llvm.riscv.vadc.nxv16i16.i16( %0, - i16 9, + i16 -9, %1, i64 %2) @@ -1066,10 +1066,10 @@ define @intrinsic_vadc_vim_nxv1i32_nxv1i32_i32( @llvm.riscv.vadc.nxv1i32.i32( %0, - i32 9, + i32 -9, %1, i64 %2) @@ -1094,10 +1094,10 @@ define @intrinsic_vadc_vim_nxv4i32_nxv4i32_i32( @llvm.riscv.vadc.nxv4i32.i32( %0, - i32 9, + i32 -9, %1, i64 %2) @@ -1122,10 +1122,10 @@ define @intrinsic_vadc_vim_nxv16i32_nxv16i32_i32( @llvm.riscv.vadc.nxv16i32.i32( %0, - i32 9, + i32 -9, %1, i64 %2) @@ -1150,10 +1150,10 @@ define @intrinsic_vadc_vim_nxv2i64_nxv2i64_i64( @llvm.riscv.vadc.nxv2i64.i64( %0, - i64 9, + i64 -9, %1, i64 %2) @@ -1178,10 +1178,10 @@ define @intrinsic_vadc_vim_nxv8i64_nxv8i64_i64( @llvm.riscv.vadc.nxv8i64.i64( %0, - i64 9, + i64 -9, %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll index 59a5f9f..756bba5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll @@ -1457,11 +1457,11 @@ define @intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8( @llvm.riscv.vadd.mask.nxv1i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -1485,11 +1485,11 @@ define @intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8( @llvm.riscv.vadd.mask.nxv2i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -1513,11 +1513,11 @@ define @intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8( @llvm.riscv.vadd.mask.nxv4i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -1541,11 +1541,11 @@ define @intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8( @llvm.riscv.vadd.mask.nxv8i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -1569,11 +1569,11 @@ define @intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vadd.mask.nxv16i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -1597,11 +1597,11 @@ define @intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vadd.mask.nxv32i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -1612,10 +1612,10 @@ define @intrinsic_vadd_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vadd.nxv64i8.i8( %0, - i8 9, + i8 -9, i32 %1) ret %a @@ -1625,11 +1625,11 @@ define @intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vadd.mask.nxv64i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -1653,11 +1653,11 @@ define @intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vadd.mask.nxv1i16.i16( %0, %1, - i16 9, + i16 -9, %2, i32 %3) @@ -1681,11 +1681,11 @@ define @intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vadd.mask.nxv2i16.i16( %0, %1, - i16 9, + i16 -9, %2, i32 %3) @@ -1709,11 +1709,11 @@ define @intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vadd.mask.nxv4i16.i16( %0, %1, - i16 9, + i16 -9, %2, i32 %3) @@ -1737,11 +1737,11 @@ define @intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vadd.mask.nxv8i16.i16( %0, %1, - i16 9, + i16 -9, %2, i32 %3) @@ -1765,11 +1765,11 @@ define @intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vadd.mask.nxv16i16.i16( %0, %1, - i16 9, + i16 -9, %2, i32 %3) @@ -1793,11 +1793,11 @@ define @intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vadd.mask.nxv32i16.i16( %0, %1, - i16 9, + i16 -9, %2, i32 %3) @@ -1821,11 +1821,11 @@ define @intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vadd.mask.nxv1i32.i32( %0, %1, - i32 9, + i32 -9, %2, i32 %3) @@ -1849,11 +1849,11 @@ define @intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vadd.mask.nxv2i32.i32( %0, %1, - i32 9, + i32 -9, %2, i32 %3) @@ -1877,11 +1877,11 @@ define @intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vadd.mask.nxv4i32.i32( %0, %1, - i32 9, + i32 -9, %2, i32 %3) @@ -1905,11 +1905,11 @@ define @intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vadd.mask.nxv8i32.i32( %0, %1, - i32 9, + i32 -9, %2, i32 %3) @@ -1933,11 +1933,11 @@ define @intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vadd.mask.nxv16i32.i32( %0, %1, - i32 9, + i32 -9, %2, i32 %3) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll index 22efd35..63218bf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll @@ -665,10 +665,10 @@ define @intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8( entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 %a = call @llvm.riscv.vmadc.nxv2i1.i8( %0, - i8 9, + i8 -9, i32 %1) ret %a @@ -691,10 +691,10 @@ define @intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8( entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 %a = call @llvm.riscv.vmadc.nxv8i1.i8( %0, - i8 9, + i8 -9, i32 %1) ret %a @@ -717,10 +717,10 @@ define @intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8( @llvm.riscv.vmadc.nxv32i1.i8( %0, - i8 9, + i8 -9, i32 %1) ret %a @@ -743,10 +743,10 @@ define @intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16( @llvm.riscv.vmadc.nxv1i1.i16( %0, - i16 9, + i16 -9, i32 %1) ret %a @@ -769,10 +769,10 @@ define @intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16( @llvm.riscv.vmadc.nxv4i1.i16( %0, - i16 9, + i16 -9, i32 %1) ret %a @@ -795,10 +795,10 @@ define @intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16( @llvm.riscv.vmadc.nxv16i1.i16( %0, - i16 9, + i16 -9, i32 %1) ret %a @@ -821,10 +821,10 @@ define @intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32( @llvm.riscv.vmadc.nxv1i1.i32( %0, - i32 9, + i32 -9, i32 %1) ret %a @@ -847,10 +847,10 @@ define @intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32( @llvm.riscv.vmadc.nxv4i1.i32( %0, - i32 9, + i32 -9, i32 %1) ret %a @@ -873,10 +873,10 @@ define @intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32( @llvm.riscv.vmadc.nxv16i1.i32( %0, - i32 9, + i32 -9, i32 %1) ret %a diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll index 9b71d84..1b6c8eb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll @@ -809,10 +809,10 @@ define @intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8( entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 %a = call @llvm.riscv.vmadc.nxv2i1.i8( %0, - i8 9, + i8 -9, i64 %1) ret %a @@ -835,10 +835,10 @@ define @intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8( entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 %a = call @llvm.riscv.vmadc.nxv8i1.i8( %0, - i8 9, + i8 -9, i64 %1) ret %a @@ -861,10 +861,10 @@ define @intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8( @llvm.riscv.vmadc.nxv32i1.i8( %0, - i8 9, + i8 -9, i64 %1) ret %a @@ -887,10 +887,10 @@ define @intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16( @llvm.riscv.vmadc.nxv1i1.i16( %0, - i16 9, + i16 -9, i64 %1) ret %a @@ -913,10 +913,10 @@ define @intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16( @llvm.riscv.vmadc.nxv4i1.i16( %0, - i16 9, + i16 -9, i64 %1) ret %a @@ -939,10 +939,10 @@ define @intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16( @llvm.riscv.vmadc.nxv16i1.i16( %0, - i16 9, + i16 -9, i64 %1) ret %a @@ -965,10 +965,10 @@ define @intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32( @llvm.riscv.vmadc.nxv1i1.i32( %0, - i32 9, + i32 -9, i64 %1) ret %a @@ -991,10 +991,10 @@ define @intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32( @llvm.riscv.vmadc.nxv4i1.i32( %0, - i32 9, + i32 -9, i64 %1) ret %a @@ -1017,10 +1017,10 @@ define @intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32( @llvm.riscv.vmadc.nxv16i1.i32( %0, - i32 9, + i32 -9, i64 %1) ret %a @@ -1043,10 +1043,10 @@ define @intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64( @llvm.riscv.vmadc.nxv2i1.i64( %0, - i64 9, + i64 -9, i64 %1) ret %a @@ -1069,10 +1069,10 @@ define @intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64( @llvm.riscv.vmadc.nxv8i1.i64( %0, - i64 9, + i64 -9, i64 %1) ret %a diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll index 22cd8af..3dbdf04 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll @@ -738,10 +738,10 @@ define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8( @llvm.riscv.vmadc.carry.in.nxv2i1.i8( %0, - i8 9, + i8 -9, %1, i32 %2) @@ -766,10 +766,10 @@ define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8( @llvm.riscv.vmadc.carry.in.nxv8i1.i8( %0, - i8 9, + i8 -9, %1, i32 %2) @@ -794,10 +794,10 @@ define @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8( @llvm.riscv.vmadc.carry.in.nxv32i1.i8( %0, - i8 9, + i8 -9, %1, i32 %2) @@ -822,10 +822,10 @@ define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16( @llvm.riscv.vmadc.carry.in.nxv1i1.i16( %0, - i16 9, + i16 -9, %1, i32 %2) @@ -850,10 +850,10 @@ define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16( @llvm.riscv.vmadc.carry.in.nxv4i1.i16( %0, - i16 9, + i16 -9, %1, i32 %2) @@ -878,10 +878,10 @@ define @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16( @llvm.riscv.vmadc.carry.in.nxv16i1.i16( %0, - i16 9, + i16 -9, %1, i32 %2) @@ -906,10 +906,10 @@ define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32( @llvm.riscv.vmadc.carry.in.nxv1i1.i32( %0, - i32 9, + i32 -9, %1, i32 %2) @@ -934,10 +934,10 @@ define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32( @llvm.riscv.vmadc.carry.in.nxv4i1.i32( %0, - i32 9, + i32 -9, %1, i32 %2) @@ -962,10 +962,10 @@ define @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32( @llvm.riscv.vmadc.carry.in.nxv16i1.i32( %0, - i32 9, + i32 -9, %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll index b5d3a53..108e4d5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll @@ -737,11 +737,11 @@ define @intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8( @llvm.riscv.vrsub.mask.nxv1i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -765,11 +765,11 @@ define @intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8( @llvm.riscv.vrsub.mask.nxv2i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -793,11 +793,11 @@ define @intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8( @llvm.riscv.vrsub.mask.nxv4i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -821,11 +821,11 @@ define @intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8( @llvm.riscv.vrsub.mask.nxv8i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -849,11 +849,11 @@ define @intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vrsub.mask.nxv16i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -877,11 +877,11 @@ define @intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vrsub.mask.nxv32i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -905,11 +905,11 @@ define @intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vrsub.mask.nxv64i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -933,11 +933,11 @@ define @intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vrsub.mask.nxv1i16.i16( %0, %1, - i16 9, + i16 -9, %2, i32 %3) @@ -961,11 +961,11 @@ define @intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vrsub.mask.nxv2i16.i16( %0, %1, - i16 9, + i16 -9, %2, i32 %3) @@ -989,11 +989,11 @@ define @intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vrsub.mask.nxv4i16.i16( %0, %1, - i16 9, + i16 -9, %2, i32 %3) @@ -1017,11 +1017,11 @@ define @intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vrsub.mask.nxv8i16.i16( %0, %1, - i16 9, + i16 -9, %2, i32 %3) @@ -1045,11 +1045,11 @@ define @intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vrsub.mask.nxv16i16.i16( %0, %1, - i16 9, + i16 -9, %2, i32 %3) @@ -1073,11 +1073,11 @@ define @intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vrsub.mask.nxv32i16.i16( %0, %1, - i16 9, + i16 -9, %2, i32 %3) @@ -1101,11 +1101,11 @@ define @intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vrsub.mask.nxv1i32.i32( %0, %1, - i32 9, + i32 -9, %2, i32 %3) @@ -1129,11 +1129,11 @@ define @intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vrsub.mask.nxv2i32.i32( %0, %1, - i32 9, + i32 -9, %2, i32 %3) @@ -1157,11 +1157,11 @@ define @intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vrsub.mask.nxv4i32.i32( %0, %1, - i32 9, + i32 -9, %2, i32 %3) @@ -1185,11 +1185,11 @@ define @intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vrsub.mask.nxv8i32.i32( %0, %1, - i32 9, + i32 -9, %2, i32 %3) @@ -1213,11 +1213,11 @@ define @intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vrsub.mask.nxv16i32.i32( %0, %1, - i32 9, + i32 -9, %2, i32 %3) -- 2.7.4