The default behavior for any_extend of a constant is to zero extend.
This occurs inside of getNode rather than allowing type legalization
to promote the constant which would sign extend. By using sign extend
with getNode the constant will be sign extended. This gives a better
chance for isel to find a simm5 immediate since all xlen bits are
examined there.
For instructions that use a uimm5 immediate, this change only affects
constants >= 128 for i8 or >= 32768 for i16. Constants that large
already wouldn't have been eligible for uimm5 and would need to use a
scalar register.
If the instruction isn't able to use simm5 or the immediate is
too large, we'll need to materialize the immediate in a register.
As far as I know constants with all 1s in the upper bits should
materialize as well or better than all 0s.
Longer term we should probably have a SEW aware PatFrag to ignore
the bits above SEW before checking simm5.
I updated about half the test cases in some tests to use a negative
constant to get coverage for this.
Reviewed By: evandro
Differential Revision: https://reviews.llvm.org/D93487
EVT OpVT = ScalarOp.getValueType();
if (OpVT == MVT::i8 || OpVT == MVT::i16 ||
(OpVT == MVT::i32 && Subtarget.is64Bit())) {
- ScalarOp =
- DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), ScalarOp);
+ // If the operand is a constant, sign extend to increase our chances
+ // of being able to use a .vi instruction. ANY_EXTEND would become a
+ // a zero extend and the simm5 check in isel would fail.
+ // FIXME: Should we ignore the upper bits in isel instead?
+ unsigned ExtOpc = isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND
+ : ISD::ANY_EXTEND;
+ ScalarOp = DAG.getNode(ExtOpc, DL, Subtarget.getXLenVT(), ScalarOp);
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
Operands);
}
EVT OpVT = ScalarOp.getValueType();
if (OpVT == MVT::i8 || OpVT == MVT::i16 ||
(OpVT == MVT::i32 && Subtarget.is64Bit())) {
- ScalarOp =
- DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), ScalarOp);
- return DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, Op->getVTList(), Operands);
+ // If the operand is a constant, sign extend to increase our chances
+ // of being able to use a .vi instruction. ANY_EXTEND would become a
+ // a zero extend and the simm5 check in isel would fail.
+ // FIXME: Should we ignore the upper bits in isel instead?
+ unsigned ExtOpc = isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND
+ : ISD::ANY_EXTEND;
+ ScalarOp = DAG.getNode(ExtOpc, DL, Subtarget.getXLenVT(), ScalarOp);
+ return DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, Op->getVTList(),
+ Operands);
}
}
}
entry:
; CHECK-LABEL: intrinsic_vadc_vim_nxv1i8_nxv1i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
<vscale x 1 x i8> %0,
- i8 9,
+ i8 -9,
<vscale x 1 x i1> %1,
i32 %2)
entry:
; CHECK-LABEL: intrinsic_vadc_vim_nxv4i8_nxv4i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
<vscale x 4 x i8> %0,
- i8 9,
+ i8 -9,
<vscale x 4 x i1> %1,
i32 %2)
entry:
; CHECK-LABEL: intrinsic_vadc_vim_nxv16i8_nxv16i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
<vscale x 16 x i8> %0,
- i8 9,
+ i8 -9,
<vscale x 16 x i1> %1,
i32 %2)
entry:
; CHECK-LABEL: intrinsic_vadc_vim_nxv64i8_nxv64i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
-; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
<vscale x 64 x i8> %0,
- i8 9,
+ i8 -9,
<vscale x 64 x i1> %1,
i32 %2)
entry:
; CHECK-LABEL: intrinsic_vadc_vim_nxv2i16_nxv2i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
<vscale x 2 x i16> %0,
- i16 9,
+ i16 -9,
<vscale x 2 x i1> %1,
i32 %2)
entry:
; CHECK-LABEL: intrinsic_vadc_vim_nxv8i16_nxv8i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
<vscale x 8 x i16> %0,
- i16 9,
+ i16 -9,
<vscale x 8 x i1> %1,
i32 %2)
entry:
; CHECK-LABEL: intrinsic_vadc_vim_nxv32i16_nxv32i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
<vscale x 32 x i16> %0,
- i16 9,
+ i16 -9,
<vscale x 32 x i1> %1,
i32 %2)
entry:
; CHECK-LABEL: intrinsic_vadc_vim_nxv2i32_nxv2i32_i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
<vscale x 2 x i32> %0,
- i32 9,
+ i32 -9,
<vscale x 2 x i1> %1,
i32 %2)
entry:
; CHECK-LABEL: intrinsic_vadc_vim_nxv8i32_nxv8i32_i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
<vscale x 8 x i32> %0,
- i32 9,
+ i32 -9,
<vscale x 8 x i1> %1,
i32 %2)
entry:
; CHECK-LABEL: intrinsic_vadc_vim_nxv2i8_nxv2i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
<vscale x 2 x i8> %0,
- i8 9,
+ i8 -9,
<vscale x 2 x i1> %1,
i64 %2)
entry:
; CHECK-LABEL: intrinsic_vadc_vim_nxv8i8_nxv8i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
<vscale x 8 x i8> %0,
- i8 9,
+ i8 -9,
<vscale x 8 x i1> %1,
i64 %2)
entry:
; CHECK-LABEL: intrinsic_vadc_vim_nxv32i8_nxv32i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
<vscale x 32 x i8> %0,
- i8 9,
+ i8 -9,
<vscale x 32 x i1> %1,
i64 %2)
entry:
; CHECK-LABEL: intrinsic_vadc_vim_nxv1i16_nxv1i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
<vscale x 1 x i16> %0,
- i16 9,
+ i16 -9,
<vscale x 1 x i1> %1,
i64 %2)
entry:
; CHECK-LABEL: intrinsic_vadc_vim_nxv4i16_nxv4i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
<vscale x 4 x i16> %0,
- i16 9,
+ i16 -9,
<vscale x 4 x i1> %1,
i64 %2)
entry:
; CHECK-LABEL: intrinsic_vadc_vim_nxv16i16_nxv16i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
<vscale x 16 x i16> %0,
- i16 9,
+ i16 -9,
<vscale x 16 x i1> %1,
i64 %2)
entry:
; CHECK-LABEL: intrinsic_vadc_vim_nxv1i32_nxv1i32_i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
<vscale x 1 x i32> %0,
- i32 9,
+ i32 -9,
<vscale x 1 x i1> %1,
i64 %2)
entry:
; CHECK-LABEL: intrinsic_vadc_vim_nxv4i32_nxv4i32_i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
<vscale x 4 x i32> %0,
- i32 9,
+ i32 -9,
<vscale x 4 x i1> %1,
i64 %2)
entry:
; CHECK-LABEL: intrinsic_vadc_vim_nxv16i32_nxv16i32_i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
<vscale x 16 x i32> %0,
- i32 9,
+ i32 -9,
<vscale x 16 x i1> %1,
i64 %2)
entry:
; CHECK-LABEL: intrinsic_vadc_vim_nxv2i64_nxv2i64_i64
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
<vscale x 2 x i64> %0,
- i64 9,
+ i64 -9,
<vscale x 2 x i1> %1,
i64 %2)
entry:
; CHECK-LABEL: intrinsic_vadc_vim_nxv8i64_nxv8i64_i64
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
<vscale x 8 x i64> %0,
- i64 9,
+ i64 -9,
<vscale x 8 x i1> %1,
i64 %2)
entry:
; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i8 9,
+ i8 -9,
<vscale x 1 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i8 9,
+ i8 -9,
<vscale x 2 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i8 9,
+ i8 -9,
<vscale x 4 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i8 9,
+ i8 -9,
<vscale x 8 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i8 9,
+ i8 -9,
<vscale x 16 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i8 9,
+ i8 -9,
<vscale x 32 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vadd_vi_nxv64i8_nxv64i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
-; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
%a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
<vscale x 64 x i8> %0,
- i8 9,
+ i8 -9,
i32 %1)
ret <vscale x 64 x i8> %a
entry:
; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
-; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
- i8 9,
+ i8 -9,
<vscale x 64 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i16 9,
+ i16 -9,
<vscale x 1 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i16 9,
+ i16 -9,
<vscale x 2 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i16 9,
+ i16 -9,
<vscale x 4 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i16 9,
+ i16 -9,
<vscale x 8 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i16 9,
+ i16 -9,
<vscale x 16 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
- i16 9,
+ i16 -9,
<vscale x 32 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i32 9,
+ i32 -9,
<vscale x 1 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i32 9,
+ i32 -9,
<vscale x 2 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i32 9,
+ i32 -9,
<vscale x 4 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i32 9,
+ i32 -9,
<vscale x 8 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
- i32 9,
+ i32 -9,
<vscale x 16 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
%a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i8(
<vscale x 2 x i8> %0,
- i8 9,
+ i8 -9,
i32 %1)
ret <vscale x 2 x i1> %a
entry:
; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
%a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i8(
<vscale x 8 x i8> %0,
- i8 9,
+ i8 -9,
i32 %1)
ret <vscale x 8 x i1> %a
entry:
; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
%a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i8(
<vscale x 32 x i8> %0,
- i8 9,
+ i8 -9,
i32 %1)
ret <vscale x 32 x i1> %a
entry:
; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
%a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i16(
<vscale x 1 x i16> %0,
- i16 9,
+ i16 -9,
i32 %1)
ret <vscale x 1 x i1> %a
entry:
; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
%a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i16(
<vscale x 4 x i16> %0,
- i16 9,
+ i16 -9,
i32 %1)
ret <vscale x 4 x i1> %a
entry:
; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
%a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i16(
<vscale x 16 x i16> %0,
- i16 9,
+ i16 -9,
i32 %1)
ret <vscale x 16 x i1> %a
entry:
; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
%a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i32(
<vscale x 1 x i32> %0,
- i32 9,
+ i32 -9,
i32 %1)
ret <vscale x 1 x i1> %a
entry:
; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
%a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i32(
<vscale x 4 x i32> %0,
- i32 9,
+ i32 -9,
i32 %1)
ret <vscale x 4 x i1> %a
entry:
; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
%a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i32(
<vscale x 16 x i32> %0,
- i32 9,
+ i32 -9,
i32 %1)
ret <vscale x 16 x i1> %a
entry:
; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
%a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i8(
<vscale x 2 x i8> %0,
- i8 9,
+ i8 -9,
i64 %1)
ret <vscale x 2 x i1> %a
entry:
; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
%a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i8(
<vscale x 8 x i8> %0,
- i8 9,
+ i8 -9,
i64 %1)
ret <vscale x 8 x i1> %a
entry:
; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
%a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i8(
<vscale x 32 x i8> %0,
- i8 9,
+ i8 -9,
i64 %1)
ret <vscale x 32 x i1> %a
entry:
; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
%a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i16(
<vscale x 1 x i16> %0,
- i16 9,
+ i16 -9,
i64 %1)
ret <vscale x 1 x i1> %a
entry:
; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
%a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i16(
<vscale x 4 x i16> %0,
- i16 9,
+ i16 -9,
i64 %1)
ret <vscale x 4 x i1> %a
entry:
; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
%a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i16(
<vscale x 16 x i16> %0,
- i16 9,
+ i16 -9,
i64 %1)
ret <vscale x 16 x i1> %a
entry:
; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
%a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i32(
<vscale x 1 x i32> %0,
- i32 9,
+ i32 -9,
i64 %1)
ret <vscale x 1 x i1> %a
entry:
; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
%a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i32(
<vscale x 4 x i32> %0,
- i32 9,
+ i32 -9,
i64 %1)
ret <vscale x 4 x i1> %a
entry:
; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
%a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i32(
<vscale x 16 x i32> %0,
- i32 9,
+ i32 -9,
i64 %1)
ret <vscale x 16 x i1> %a
entry:
; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
%a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i64(
<vscale x 2 x i64> %0,
- i64 9,
+ i64 -9,
i64 %1)
ret <vscale x 2 x i1> %a
entry:
; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
%a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i64(
<vscale x 8 x i64> %0,
- i64 9,
+ i64 -9,
i64 %1)
ret <vscale x 8 x i1> %a
entry:
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i8(
<vscale x 2 x i8> %0,
- i8 9,
+ i8 -9,
<vscale x 2 x i1> %1,
i32 %2)
entry:
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i8(
<vscale x 8 x i8> %0,
- i8 9,
+ i8 -9,
<vscale x 8 x i1> %1,
i32 %2)
entry:
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.i8(
<vscale x 32 x i8> %0,
- i8 9,
+ i8 -9,
<vscale x 32 x i1> %1,
i32 %2)
entry:
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i16(
<vscale x 1 x i16> %0,
- i16 9,
+ i16 -9,
<vscale x 1 x i1> %1,
i32 %2)
entry:
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i16(
<vscale x 4 x i16> %0,
- i16 9,
+ i16 -9,
<vscale x 4 x i1> %1,
i32 %2)
entry:
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i16(
<vscale x 16 x i16> %0,
- i16 9,
+ i16 -9,
<vscale x 16 x i1> %1,
i32 %2)
entry:
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i32(
<vscale x 1 x i32> %0,
- i32 9,
+ i32 -9,
<vscale x 1 x i1> %1,
i32 %2)
entry:
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i32(
<vscale x 4 x i32> %0,
- i32 9,
+ i32 -9,
<vscale x 4 x i1> %1,
i32 %2)
entry:
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
%a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i32(
<vscale x 16 x i32> %0,
- i32 9,
+ i32 -9,
<vscale x 16 x i1> %1,
i32 %2)
entry:
; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i8 9,
+ i8 -9,
<vscale x 1 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i8 9,
+ i8 -9,
<vscale x 2 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i8 9,
+ i8 -9,
<vscale x 4 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i8 9,
+ i8 -9,
<vscale x 8 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i8 9,
+ i8 -9,
<vscale x 16 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i8 9,
+ i8 -9,
<vscale x 32 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
-; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
- i8 9,
+ i8 -9,
<vscale x 64 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i16 9,
+ i16 -9,
<vscale x 1 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i16 9,
+ i16 -9,
<vscale x 2 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i16 9,
+ i16 -9,
<vscale x 4 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i16 9,
+ i16 -9,
<vscale x 8 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i16 9,
+ i16 -9,
<vscale x 16 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
- i16 9,
+ i16 -9,
<vscale x 32 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i32 9,
+ i32 -9,
<vscale x 1 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i32 9,
+ i32 -9,
<vscale x 2 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i32 9,
+ i32 -9,
<vscale x 4 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i32 9,
+ i32 -9,
<vscale x 8 x i1> %2,
i32 %3)
entry:
; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
%a = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
- i32 9,
+ i32 -9,
<vscale x 16 x i1> %2,
i32 %3)