-//===- DivRemPairs.cpp - Hoist/decompose division and remainder -*- C++ -*-===//
+//===- DivRemPairs.cpp - Hoist/[dr]ecompose division and remainder --------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
//
-// This pass hoists and/or decomposes integer division and remainder
+// This pass hoists and/or decomposes/recomposes integer division and remainder
// instructions to enable CFG improvements and better codegen.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/PatternMatch.h"
#include "llvm/Pass.h"
#include "llvm/Support/DebugCounter.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/BypassSlowDivision.h"
using namespace llvm;
+using namespace llvm::PatternMatch;
#define DEBUG_TYPE "div-rem-pairs"
STATISTIC(NumPairs, "Number of div/rem pairs");
+STATISTIC(NumRecomposed, "Number of instructions recomposed");
STATISTIC(NumHoisted, "Number of instructions hoisted");
STATISTIC(NumDecomposed, "Number of instructions decomposed");
DEBUG_COUNTER(DRPCounter, "div-rem-pairs-transform",
"Controls transformations in div-rem-pairs pass");
+namespace {
+struct ExpandedMatch {
+ DivRemMapKey Key;
+ Instruction *Value;
+};
+} // namespace
+
+/// See if we can match: (which is the form we expand into)
+/// X - ((X ?/ Y) * Y)
+/// which is equivalent to:
+/// X ?% Y
+static llvm::Optional<ExpandedMatch> matchExpandedRem(Instruction &I) {
+ Value *Dividend, *XroundedDownToMultipleOfY;
+ if (!match(&I, m_Sub(m_Value(Dividend), m_Value(XroundedDownToMultipleOfY))))
+ return llvm::None;
+
+ Value *Divisor;
+ Instruction *Div;
+ // Look for ((X / Y) * Y)
+ if (!match(
+ XroundedDownToMultipleOfY,
+ m_c_Mul(m_CombineAnd(m_IDiv(m_Specific(Dividend), m_Value(Divisor)),
+ m_Instruction(Div)),
+ m_Deferred(Divisor))))
+ return llvm::None;
+
+ ExpandedMatch M;
+ M.Key.SignedOp = Div->getOpcode() == Instruction::SDiv;
+ M.Key.Dividend = Dividend;
+ M.Key.Divisor = Divisor;
+ M.Value = &I;
+ return M;
+}
+
/// A thin wrapper to store two values that we matched as div-rem pair.
/// We want this extra indirection to avoid dealing with RAUW'ing the map keys.
struct DivRemPairWorklistEntry {
/// In this pair, what are the divident and divisor?
Value *getDividend() const { return DivInst->getOperand(0); }
Value *getDivisor() const { return DivInst->getOperand(1); }
+
+ bool isRemExpanded() const {
+ switch (RemInst->getOpcode()) {
+ case Instruction::SRem:
+ case Instruction::URem:
+ return false; // single 'rem' instruction - unexpanded form.
+ default:
+ return true; // anything else means we have remainder in expanded form.
+ }
+ }
};
using DivRemWorklistTy = SmallVector<DivRemPairWorklistEntry, 4>;
RemMap[DivRemMapKey(true, I.getOperand(0), I.getOperand(1))] = &I;
else if (I.getOpcode() == Instruction::URem)
RemMap[DivRemMapKey(false, I.getOperand(0), I.getOperand(1))] = &I;
+ else if (auto Match = matchExpandedRem(I))
+ RemMap[Match->Key] = Match->Value;
}
}
// Process each entry in the worklist.
for (DivRemPairWorklistEntry &E : Worklist) {
+ if (!DebugCounter::shouldExecute(DRPCounter))
+ continue;
+
bool HasDivRemOp = TTI.hasDivRemOp(E.getType(), E.isSigned());
auto &DivInst = E.DivInst;
auto &RemInst = E.RemInst;
+ const bool RemOriginallyWasInExpandedForm = E.isRemExpanded();
+
+ if (HasDivRemOp && E.isRemExpanded()) {
+ // The target supports div+rem but the rem is expanded.
+ // We should recompose it first.
+ Value *X = E.getDividend();
+ Value *Y = E.getDivisor();
+ Instruction *RealRem = E.isSigned() ? BinaryOperator::CreateSRem(X, Y)
+ : BinaryOperator::CreateURem(X, Y);
+ // Note that we place it right next to the original expanded instruction,
+ // and letting further handling to move it if needed.
+ RealRem->setName(RemInst->getName() + ".recomposed");
+ RealRem->insertAfter(RemInst);
+ Instruction *OrigRemInst = RemInst;
+ // Update AssertingVH<> with new instruction so it doesn't assert.
+ RemInst = RealRem;
+ // And replace the original instruction with the new one.
+ OrigRemInst->replaceAllUsesWith(RealRem);
+ OrigRemInst->eraseFromParent();
+ NumRecomposed++;
+ // Note that we have left ((X / Y) * Y) around.
+ // If it had other uses we could rewrite it as X - X % Y
+ }
+
+ assert((!E.isRemExpanded() || !HasDivRemOp) &&
+ "*If* the target supports div-rem, then by now the RemInst *is* "
+ "Instruction::[US]Rem.");
+
// If the target supports div+rem and the instructions are in the same block
// already, there's nothing to do. The backend should handle this. If the
// target does not support div+rem, then we will decompose the rem.
continue;
bool DivDominates = DT.dominates(DivInst, RemInst);
- if (!DivDominates && !DT.dominates(RemInst, DivInst))
+ if (!DivDominates && !DT.dominates(RemInst, DivInst)) {
+ // We have matching div-rem pair, but they are in two different blocks,
+ // neither of which dominates one another.
+ assert(!RemOriginallyWasInExpandedForm &&
+ "Won't happen for expanded-form rem.");
+ // FIXME: We could hoist both ops to the common predecessor block?
continue;
+ }
- if (!DebugCounter::shouldExecute(DRPCounter))
+ // The target does not have a single div/rem operation,
+ // and the rem is already in expanded form. Nothing to do.
+ if (!HasDivRemOp && E.isRemExpanded())
continue;
if (HasDivRemOp) {
DivInst->moveAfter(RemInst);
NumHoisted++;
} else {
- // The target does not have a single div/rem operation. Decompose the
- // remainder calculation as:
+ // The target does not have a single div/rem operation,
+ // and the rem is *not* in a already-expanded form.
+ // Decompose the remainder calculation as:
// X % Y --> X - ((X / Y) * Y).
+
+ assert(!RemOriginallyWasInExpandedForm &&
+ "We should not be expanding if the rem was in expanded form to "
+ "begin with.");
+
Value *X = E.getDividend();
Value *Y = E.getDivisor();
Instruction *Mul = BinaryOperator::CreateMul(DivInst, Y);
; CHECK-LABEL: @decompose_illegal_srem_same_block(
; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[T0:%.*]] = mul i32 [[DIV]], [[B]]
-; CHECK-NEXT: [[REM:%.*]] = sub i32 [[A]], [[T0]]
-; CHECK-NEXT: call void @foo(i32 [[REM]], i32 [[DIV]])
+; CHECK-NEXT: [[REM_RECOMPOSED:%.*]] = srem i32 [[A]], [[B]]
+; CHECK-NEXT: call void @foo(i32 [[REM_RECOMPOSED]], i32 [[DIV]])
; CHECK-NEXT: ret void
;
%div = sdiv i32 %a, %b
; CHECK-LABEL: @decompose_illegal_urem_same_block(
; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[T0:%.*]] = mul i32 [[DIV]], [[B]]
-; CHECK-NEXT: [[REM:%.*]] = sub i32 [[A]], [[T0]]
-; CHECK-NEXT: call void @foo(i32 [[REM]], i32 [[DIV]])
+; CHECK-NEXT: [[REM_RECOMPOSED:%.*]] = urem i32 [[A]], [[B]]
+; CHECK-NEXT: call void @foo(i32 [[REM_RECOMPOSED]], i32 [[DIV]])
; CHECK-NEXT: ret void
;
%div = udiv i32 %a, %b
; CHECK-LABEL: @hoist_srem(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DIV:%.*]] = sdiv i16 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[REM_RECOMPOSED:%.*]] = srem i16 [[A]], [[B]]
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i16 [[DIV]], 42
; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
; CHECK: if:
; CHECK-NEXT: [[T0:%.*]] = mul i16 [[DIV]], [[B]]
-; CHECK-NEXT: [[REM:%.*]] = sub i16 [[A]], [[T0]]
; CHECK-NEXT: br label [[END]]
; CHECK: end:
-; CHECK-NEXT: [[RET:%.*]] = phi i16 [ [[REM]], [[IF]] ], [ 3, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[RET:%.*]] = phi i16 [ [[REM_RECOMPOSED]], [[IF]] ], [ 3, [[ENTRY:%.*]] ]
; CHECK-NEXT: ret i16 [[RET]]
;
entry:
; CHECK-LABEL: @hoist_urem(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[REM_RECOMPOSED:%.*]] = urem i8 [[A]], [[B]]
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[DIV]], 42
; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
; CHECK: if:
; CHECK-NEXT: [[T0:%.*]] = mul i8 [[DIV]], [[B]]
-; CHECK-NEXT: [[REM:%.*]] = sub i8 [[A]], [[T0]]
; CHECK-NEXT: br label [[END]]
; CHECK: end:
-; CHECK-NEXT: [[RET:%.*]] = phi i8 [ [[REM]], [[IF]] ], [ 3, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[RET:%.*]] = phi i8 [ [[REM_RECOMPOSED]], [[IF]] ], [ 3, [[ENTRY:%.*]] ]
; CHECK-NEXT: ret i8 [[RET]]
;
entry:
; CHECK-NEXT: [[T0:%.*]] = mul nsw i32 [[Z:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[T1:%.*]] = sdiv i32 [[X:%.*]], [[T0]]
; CHECK-NEXT: [[T2:%.*]] = mul nsw i32 [[T0]], [[T1]]
-; CHECK-NEXT: [[T3:%.*]] = sub nsw i32 [[X]], [[T2]]
-; CHECK-NEXT: [[T4:%.*]] = sdiv i32 [[T3]], [[Y]]
+; CHECK-NEXT: [[T3_RECOMPOSED:%.*]] = srem i32 [[X]], [[T0]]
+; CHECK-NEXT: [[T4:%.*]] = sdiv i32 [[T3_RECOMPOSED]], [[Y]]
; CHECK-NEXT: [[T5:%.*]] = mul nsw i32 [[T4]], [[Y]]
-; CHECK-NEXT: [[T6:%.*]] = sub nsw i32 [[T3]], [[T5]]
-; CHECK-NEXT: ret i32 [[T6]]
+; CHECK-NEXT: [[T6_RECOMPOSED:%.*]] = srem i32 [[T3_RECOMPOSED]], [[Y]]
+; CHECK-NEXT: ret i32 [[T6_RECOMPOSED]]
;
%t0 = mul nsw i32 %Z, %Y
%t1 = sdiv i32 %X, %t0