From: Craig Topper Date: Thu, 28 Apr 2022 16:21:13 +0000 (-0700) Subject: [RISCV] Use default promotion for (i32 (shl 1, X)) on RV64 when Zbs is enabled. X-Git-Tag: upstream/15.0.7~9127 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=ec11fbb1d682e9c3e67eafc036c92fe9200b40f5;p=platform%2Fupstream%2Fllvm.git [RISCV] Use default promotion for (i32 (shl 1, X)) on RV64 when Zbs is enabled. This improves opportunities to use bset/bclr/binv. Unfortunately, there are no W versions of these instrcutions so this isn't always a clear win. If we use SLLW we get free sign extend and shift masking, but need to put a 1 in a register and can't remove an or/xor. If we use bset/bclr/binv we remove the immediate materializationg and logic op, but might need a mask on the shift amount and sext.w. Reviewed By: luismarques Differential Revision: https://reviews.llvm.org/D124096 --- diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 2b3fa16..5f555bb 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -6822,6 +6822,10 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation"); if (N->getOperand(1).getOpcode() != ISD::Constant) { + // If we can use a BSET instruction, allow default promotion to apply. + if (N->getOpcode() == ISD::SHL && Subtarget.hasStdExtZbs() && + isOneConstant(N->getOperand(0))) + break; Results.push_back(customLegalizeToWOp(N, DAG)); break; } diff --git a/llvm/test/CodeGen/RISCV/rv64zbs.ll b/llvm/test/CodeGen/RISCV/rv64zbs.ll index edd9e96..1bc5dba 100644 --- a/llvm/test/CodeGen/RISCV/rv64zbs.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbs.ll @@ -15,10 +15,9 @@ define signext i32 @bclr_i32(i32 signext %a, i32 signext %b) nounwind { ; ; RV64ZBS-LABEL: bclr_i32: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: li a2, 1 -; RV64ZBS-NEXT: sllw a1, a2, a1 -; RV64ZBS-NEXT: not a1, a1 -; RV64ZBS-NEXT: and a0, a1, a0 +; RV64ZBS-NEXT: andi a1, a1, 31 +; RV64ZBS-NEXT: bclr a0, a0, a1 +; RV64ZBS-NEXT: sext.w a0, a0 ; RV64ZBS-NEXT: ret %and = and i32 %b, 31 %shl = shl nuw i32 1, %and @@ -38,10 +37,8 @@ define signext i32 @bclr_i32_no_mask(i32 signext %a, i32 signext %b) nounwind { ; ; RV64ZBS-LABEL: bclr_i32_no_mask: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: li a2, 1 -; RV64ZBS-NEXT: sllw a1, a2, a1 -; RV64ZBS-NEXT: not a1, a1 -; RV64ZBS-NEXT: and a0, a1, a0 +; RV64ZBS-NEXT: bclr a0, a0, a1 +; RV64ZBS-NEXT: sext.w a0, a0 ; RV64ZBS-NEXT: ret %shl = shl i32 1, %b %neg = xor i32 %shl, -1 @@ -62,10 +59,8 @@ define signext i32 @bclr_i32_load(i32* %p, i32 signext %b) nounwind { ; RV64ZBS-LABEL: bclr_i32_load: ; RV64ZBS: # %bb.0: ; RV64ZBS-NEXT: lw a0, 0(a0) -; RV64ZBS-NEXT: li a2, 1 -; RV64ZBS-NEXT: sllw a1, a2, a1 -; RV64ZBS-NEXT: not a1, a1 -; RV64ZBS-NEXT: and a0, a1, a0 +; RV64ZBS-NEXT: bclr a0, a0, a1 +; RV64ZBS-NEXT: sext.w a0, a0 ; RV64ZBS-NEXT: ret %a = load i32, i32* %p %shl = shl i32 1, %b @@ -123,9 +118,9 @@ define signext i32 @bset_i32(i32 signext %a, i32 signext %b) nounwind { ; ; RV64ZBS-LABEL: bset_i32: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: li a2, 1 -; RV64ZBS-NEXT: sllw a1, a2, a1 -; RV64ZBS-NEXT: or a0, a1, a0 +; RV64ZBS-NEXT: andi a1, a1, 31 +; RV64ZBS-NEXT: bset a0, a0, a1 +; RV64ZBS-NEXT: sext.w a0, a0 ; RV64ZBS-NEXT: ret %and = and i32 %b, 31 %shl = shl nuw i32 1, %and @@ -143,9 +138,8 @@ define signext i32 @bset_i32_no_mask(i32 signext %a, i32 signext %b) nounwind { ; ; RV64ZBS-LABEL: bset_i32_no_mask: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: li a2, 1 -; RV64ZBS-NEXT: sllw a1, a2, a1 -; RV64ZBS-NEXT: or a0, a1, a0 +; RV64ZBS-NEXT: bset a0, a0, a1 +; RV64ZBS-NEXT: sext.w a0, a0 ; RV64ZBS-NEXT: ret %shl = shl i32 1, %b %or = or i32 %shl, %a @@ -164,9 +158,8 @@ define signext i32 @bset_i32_load(i32* %p, i32 signext %b) nounwind { ; RV64ZBS-LABEL: bset_i32_load: ; RV64ZBS: # %bb.0: ; RV64ZBS-NEXT: lw a0, 0(a0) -; RV64ZBS-NEXT: li a2, 1 -; RV64ZBS-NEXT: sllw a1, a2, a1 -; RV64ZBS-NEXT: or a0, a1, a0 +; RV64ZBS-NEXT: bset a0, a0, a1 +; RV64ZBS-NEXT: sext.w a0, a0 ; RV64ZBS-NEXT: ret %a = load i32, i32* %p %shl = shl i32 1, %b @@ -184,8 +177,8 @@ define signext i32 @bset_i32_zero(i32 signext %a) nounwind { ; ; RV64ZBS-LABEL: bset_i32_zero: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: li a1, 1 -; RV64ZBS-NEXT: sllw a0, a1, a0 +; RV64ZBS-NEXT: bset a0, zero, a0 +; RV64ZBS-NEXT: sext.w a0, a0 ; RV64ZBS-NEXT: ret %shl = shl i32 1, %a ret i32 %shl @@ -252,9 +245,9 @@ define signext i32 @binv_i32(i32 signext %a, i32 signext %b) nounwind { ; ; RV64ZBS-LABEL: binv_i32: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: li a2, 1 -; RV64ZBS-NEXT: sllw a1, a2, a1 -; RV64ZBS-NEXT: xor a0, a1, a0 +; RV64ZBS-NEXT: andi a1, a1, 31 +; RV64ZBS-NEXT: binv a0, a0, a1 +; RV64ZBS-NEXT: sext.w a0, a0 ; RV64ZBS-NEXT: ret %and = and i32 %b, 31 %shl = shl nuw i32 1, %and @@ -272,9 +265,8 @@ define signext i32 @binv_i32_no_mask(i32 signext %a, i32 signext %b) nounwind { ; ; RV64ZBS-LABEL: binv_i32_no_mask: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: li a2, 1 -; RV64ZBS-NEXT: sllw a1, a2, a1 -; RV64ZBS-NEXT: xor a0, a1, a0 +; RV64ZBS-NEXT: binv a0, a0, a1 +; RV64ZBS-NEXT: sext.w a0, a0 ; RV64ZBS-NEXT: ret %shl = shl i32 1, %b %xor = xor i32 %shl, %a @@ -293,9 +285,8 @@ define signext i32 @binv_i32_load(i32* %p, i32 signext %b) nounwind { ; RV64ZBS-LABEL: binv_i32_load: ; RV64ZBS: # %bb.0: ; RV64ZBS-NEXT: lw a0, 0(a0) -; RV64ZBS-NEXT: li a2, 1 -; RV64ZBS-NEXT: sllw a1, a2, a1 -; RV64ZBS-NEXT: xor a0, a1, a0 +; RV64ZBS-NEXT: binv a0, a0, a1 +; RV64ZBS-NEXT: sext.w a0, a0 ; RV64ZBS-NEXT: ret %a = load i32, i32* %p %shl = shl i32 1, %b