We already treat -1 passed to instruction intrinsics as vlmax, this
make vsetvli consistent.
Reviewed By: rogfer01
Differential Revision: https://reviews.llvm.org/D152954
SDValue VLOperand;
unsigned Opcode = RISCV::PseudoVSETVLI;
- if (VLMax) {
+ if (VLMax || isAllOnesConstant(Node->getOperand(1))) {
VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
Opcode = RISCV::PseudoVSETVLIX0;
} else {
%x = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.iXLen(<vscale x 4 x i32> undef, <vscale x 4 x i32>* %ptr, iXLen %vl1)
ret <vscale x 4 x i32> %x
}
+
+define iXLen @test_vsetvli_negone_e8m1(iXLen %avl) nounwind {
+; CHECK-LABEL: test_vsetvli_negone_e8m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: ret
+ %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen -1, iXLen 0, iXLen 0)
+ ret iXLen %vl
+}