From: Jessica Paquette Date: Wed, 5 Feb 2020 21:54:00 +0000 (-0800) Subject: [AArch64][GlobalISel] Emit TBNZ with G_BRCOND where the condition is SLT X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=a3738414072900ace9cbbe209d0195a3443d1d54;p=platform%2Fupstream%2Fllvm.git [AArch64][GlobalISel] Emit TBNZ with G_BRCOND where the condition is SLT When we have a G_ICMP which checks SLT, and the comparison is against 0, we can emit a TBNZ instead of a CBZ. This lets us fold in things into the branch, which can provide some code size savings. This is similar to the case in `AArch64TargetLowering::LowerBR_CC`. https://reviews.llvm.org/D74090 --- diff --git a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp index 3d6d11f35..c4ac0bc 100644 --- a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp @@ -1248,6 +1248,18 @@ bool AArch64InstructionSelector::selectCompareBranch( return true; } + // When we have a less than comparison, we can just test if the last bit + // is not zero. + // + // Note that we don't want to do this when we have a G_AND because it can + // become a tst. The tst will make the test bit in the TB(N)Z redundant. + if (Pred == CmpInst::ICMP_SLT && LHSMI->getOpcode() != TargetOpcode::G_AND) { + uint64_t Bit = MRI.getType(LHS).getSizeInBits() - 1; + emitTestBit(LHS, Bit, /*IsNegative = */ true, DestMBB, MIB); + I.eraseFromParent(); + return true; + } + const RegisterBank &RB = *RBI.getRegBank(LHS, MRI, TRI); if (RB.getID() != AArch64::GPRRegBankID) return false; diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/tbnz-slt.mir b/llvm/test/CodeGen/AArch64/GlobalISel/tbnz-slt.mir new file mode 100644 index 0000000..9bcae08 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/GlobalISel/tbnz-slt.mir @@ -0,0 +1,121 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple aarch64-unknown-unknown -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s +# +# Test that we can produce a TBNZ when we have a slt compare against 0. +# +# The bit tested should be the size of the test register minus 1. +# + +... +--- +name: tbnzx_slt +alignment: 4 +legalized: true +regBankSelected: true +body: | + ; CHECK-LABEL: name: tbnzx_slt + ; CHECK: bb.0: + ; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000) + ; CHECK: %copy:gpr64 = COPY $x0 + ; CHECK: TBNZX %copy, 63, %bb.1 + ; CHECK: B %bb.0 + ; CHECK: bb.1: + ; CHECK: RET_ReallyLR + bb.0: + successors: %bb.0, %bb.1 + liveins: $x0 + %copy:gpr(s64) = COPY $x0 + %zero:gpr(s64) = G_CONSTANT i64 0 + %cmp:gpr(s32) = G_ICMP intpred(slt), %copy(s64), %zero + %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32) + G_BRCOND %cmp_trunc(s1), %bb.1 + G_BR %bb.0 + bb.1: + RET_ReallyLR + +... +--- +name: tbnzw_slt +alignment: 4 +legalized: true +regBankSelected: true +body: | + ; CHECK-LABEL: name: tbnzw_slt + ; CHECK: bb.0: + ; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000) + ; CHECK: %copy:gpr32 = COPY $w0 + ; CHECK: TBNZW %copy, 31, %bb.1 + ; CHECK: B %bb.0 + ; CHECK: bb.1: + ; CHECK: RET_ReallyLR + bb.0: + successors: %bb.0, %bb.1 + liveins: $x0 + %copy:gpr(s32) = COPY $w0 + %zero:gpr(s32) = G_CONSTANT i32 0 + %cmp:gpr(s32) = G_ICMP intpred(slt), %copy(s32), %zero + %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32) + G_BRCOND %cmp_trunc(s1), %bb.1 + G_BR %bb.0 + bb.1: + RET_ReallyLR + +... +--- +name: no_tbnz_not_zero +alignment: 4 +legalized: true +regBankSelected: true +body: | + ; CHECK-LABEL: name: no_tbnz_not_zero + ; CHECK: bb.0: + ; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000) + ; CHECK: %copy:gpr32sp = COPY $w0 + ; CHECK: $wzr = SUBSWri %copy, 1, 0, implicit-def $nzcv + ; CHECK: Bcc 11, %bb.1, implicit $nzcv + ; CHECK: B %bb.0 + ; CHECK: bb.1: + ; CHECK: RET_ReallyLR + bb.0: + successors: %bb.0, %bb.1 + liveins: $x0 + %copy:gpr(s32) = COPY $w0 + %one:gpr(s32) = G_CONSTANT i32 1 + %cmp:gpr(s32) = G_ICMP intpred(slt), %copy(s32), %one + %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32) + G_BRCOND %cmp_trunc(s1), %bb.1 + G_BR %bb.0 + bb.1: + RET_ReallyLR + +... +--- +name: dont_fold_and +alignment: 4 +legalized: true +regBankSelected: true +body: | + ; CHECK-LABEL: name: dont_fold_and + ; CHECK: bb.0: + ; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000) + ; CHECK: %copy:gpr64 = COPY $x0 + ; CHECK: $xzr = ANDSXri %copy, 8000, implicit-def $nzcv + ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv + ; CHECK: TBNZW %cmp, 0, %bb.1 + ; CHECK: B %bb.0 + ; CHECK: bb.1: + ; CHECK: RET_ReallyLR + bb.0: + successors: %bb.0, %bb.1 + liveins: $x0 + %copy:gpr(s64) = COPY $x0 + %bit:gpr(s64) = G_CONSTANT i64 8 + %zero:gpr(s64) = G_CONSTANT i64 0 + %c:gpr(s64) = G_CONSTANT i64 8 + %and:gpr(s64) = G_AND %copy, %bit + %cmp:gpr(s32) = G_ICMP intpred(slt), %and(s64), %zero + %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32) + G_BRCOND %cmp_trunc(s1), %bb.1 + G_BR %bb.0 + bb.1: + RET_ReallyLR