From 8c4937b33fe9090546f6dc834e174177075b5084 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Mon, 21 Mar 2022 14:27:09 -0700 Subject: [PATCH] [RISCV] Special case sign extended scalars when type legalizing nxvXi64 .vx instrinsics on RV32. On RV32, we need to type legalize i64 scalar arguments to intrinsics. We usually do this by splatting the value into a vector separately. If the scalar happens to be sign extended, we can continue using a .vx intrinsic. We already special cased sign extended constants, this extends it to any sign extended value. I've only added tests for one case of vadd. Most intrinsics go through the same check. I can add more tests if we're concerned. Differential Revision: https://reviews.llvm.org/D122186 --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 8 ++---- llvm/test/CodeGen/RISCV/rvv/vadd.ll | 43 +++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 5 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 9edddd6..64ba46f 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -4624,11 +4624,9 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG, // If this is a sign-extended 32-bit constant, we can truncate it and rely // on the instruction to sign-extend since SEW>XLEN. - if (auto *CVal = dyn_cast(ScalarOp)) { - if (isInt<32>(CVal->getSExtValue())) { - ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32); - return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands); - } + if (DAG.ComputeNumSignBits(ScalarOp) > 32) { + ScalarOp = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, ScalarOp); + return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands); } switch (IntNo) { diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd.ll b/llvm/test/CodeGen/RISCV/rvv/vadd.ll index ac5dde5..c6328f4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd.ll @@ -1881,6 +1881,49 @@ entry: ret %a } +define @intrinsic_vadd_vx_sext_nxv1i64_nxv1i64_i64( %0, i32 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vadd_vx_sext_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV32-NEXT: vadd.vx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vadd_vx_sext_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vadd.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %ext = sext i32 %1 to i64 + %a = call @llvm.riscv.vadd.nxv1i64.i64( + undef, + %0, + i64 %ext, + iXLen %2) + + ret %a +} + +define @intrinsic_vadd_vx_sextload_nxv1i64_nxv1i64_i64( %0, i32* %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_sextload_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lw a0, 0(a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %load = load i32, i32* %1 + %ext = sext i32 %load to i64 + %a = call @llvm.riscv.vadd.nxv1i64.i64( + undef, + %0, + i64 %ext, + iXLen %2) + + ret %a +} + declare @llvm.riscv.vadd.mask.nxv1i64.i64( , , -- 2.7.4