From a82a28ae12ae06e6ef1b8364f465e9ea5218fe8a Mon Sep 17 00:00:00 2001 From: Jessica Paquette Date: Tue, 4 Feb 2020 15:10:53 -0800 Subject: [PATCH] [AArch64][GlobalISel] Fix one use check in getTestBitReg (1) The check needs to be on the 0th operand of whatever we're folding (2) Checks for validity should happen before we change the bit Fixes a bug which caused MultiSource/Applications/JM/lencod to fail at -O3. Differential Revision: https://reviews.llvm.org/D74002 --- .../Target/AArch64/AArch64InstructionSelector.cpp | 13 ++++--- .../AArch64/GlobalISel/opt-fold-ext-tbz-tbnz.mir | 5 ++- .../AArch64/GlobalISel/opt-fold-shift-tbz-tbnz.mir | 42 ++++++++++++++++++++++ 3 files changed, 55 insertions(+), 5 deletions(-) diff --git a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp index f6f7108..4fbb3a4 100644 --- a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp @@ -996,6 +996,11 @@ static Register getTestBitReg(Register Reg, uint64_t &Bit, bool &Invert, assert(Reg.isValid() && "Expected valid register!"); while (MachineInstr *MI = getDefIgnoringCopies(Reg, MRI)) { unsigned Opc = MI->getOpcode(); + + if (!MI->getOperand(0).isReg() || + !MRI.hasOneUse(MI->getOperand(0).getReg())) + break; + // (tbz (any_ext x), b) -> (tbz x, b) if we don't use the extended bits. // // (tbz (trunc x), b) -> (tbz x, b) is always safe, because the bit number @@ -1044,8 +1049,8 @@ static Register getTestBitReg(Register Reg, uint64_t &Bit, bool &Invert, } } - // Didn't find a constant. Bail out of the loop. - if (!C) + // Didn't find a constant or viable register. Bail out of the loop. + if (!C || !TestReg.isValid()) break; // We found a suitable instruction with a constant. Check to see if we can @@ -1083,8 +1088,8 @@ static Register getTestBitReg(Register Reg, uint64_t &Bit, bool &Invert, } // Check if we found anything worth folding. - if (!NextReg.isValid() || !MRI.hasOneUse(NextReg)) - break; + if (!NextReg.isValid()) + return Reg; Reg = NextReg; } diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-ext-tbz-tbnz.mir b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-ext-tbz-tbnz.mir index 453cef5..2d4fd65 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-ext-tbz-tbnz.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-ext-tbz-tbnz.mir @@ -114,7 +114,10 @@ body: | ; CHECK: %copy:gpr32 = COPY $w0 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, %copy, %subreg.sub_32 ; CHECK: %zext:gpr64 = UBFMXri [[SUBREG_TO_REG]], 0, 31 - ; CHECK: TBNZW %copy, 3, %bb.1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY %zext + ; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY [[COPY]].sub_32 + ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY [[COPY1]] + ; CHECK: TBNZW [[COPY2]], 3, %bb.1 ; CHECK: B %bb.0 ; CHECK: bb.1: ; CHECK: $x0 = COPY %zext diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-shift-tbz-tbnz.mir b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-shift-tbz-tbnz.mir index 388944c..afd4310 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-shift-tbz-tbnz.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-shift-tbz-tbnz.mir @@ -112,3 +112,45 @@ body: | G_BR %bb.0 bb.1: RET_ReallyLR + +... +--- +name: dont_fold_shl_3 +alignment: 4 +legalized: true +regBankSelected: true +body: | + ; CHECK-LABEL: name: dont_fold_shl_3 + ; CHECK: bb.0: + ; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000) + ; CHECK: %copy:gpr64 = COPY $x0 + ; CHECK: %shl:gpr64 = UBFMXri %copy, 62, 61 + ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY %shl + ; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY [[COPY]].sub_32 + ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY [[COPY1]] + ; CHECK: TBNZW [[COPY2]], 3, %bb.1 + ; CHECK: B %bb.0 + ; CHECK: bb.1: + ; CHECK: %second_use:gpr64sp = ORRXri %shl, 8000 + ; CHECK: $x0 = COPY %second_use + ; CHECK: RET_ReallyLR implicit $x0 + bb.0: + successors: %bb.0, %bb.1 + liveins: $x0 + %copy:gpr(s64) = COPY $x0 + %bit:gpr(s64) = G_CONSTANT i64 8 + %zero:gpr(s64) = G_CONSTANT i64 0 + %fold_cst:gpr(s64) = G_CONSTANT i64 2 + + ; Don't walk past the G_SHL when it's used more than once. + %shl:gpr(s64) = G_SHL %copy, %fold_cst + %and:gpr(s64) = G_AND %shl, %bit + %cmp:gpr(s32) = G_ICMP intpred(ne), %and(s64), %zero + %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32) + G_BRCOND %cmp_trunc(s1), %bb.1 + G_BR %bb.0 + + bb.1: + %second_use:gpr(s64) = G_OR %shl, %bit + $x0 = COPY %second_use + RET_ReallyLR implicit $x0 -- 2.7.4