return false;
}
+/// Take the pointer operand from the Load/Store instruction.
+/// Returns NULL if this is not a valid Load/Store instruction.
+static Value *getPointerOperand(Value *I) {
+ if (LoadInst *LI = dyn_cast<LoadInst>(I))
+ return LI->getPointerOperand();
+ if (StoreInst *SI = dyn_cast<StoreInst>(I))
+ return SI->getPointerOperand();
+ return nullptr;
+}
+
void LoopVectorizationCostModel::collectValuesToIgnore() {
// Ignore ephemeral values.
CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
VecValuesToIgnore.insert(Casts.begin(), Casts.end());
}
- // Ignore induction phis that are only used in either GetElementPtr or ICmp
- // instruction to exit loop. Induction variables usually have large types and
- // can have big impact when estimating register usage.
- // This is for when VF > 1.
+ // Insert uniform instruction into VecValuesToIgnore.
+ // Collect non-gather/scatter and non-consecutive ptr in NonConsecutivePtr.
+ SmallPtrSet<Instruction *, 8> NonConsecutivePtr;
+ for (auto *BB : TheLoop->getBlocks()) {
+ for (auto &I : *BB) {
+ if (Legal->isUniformAfterVectorization(&I))
+ VecValuesToIgnore.insert(&I);
+ Instruction *PI = dyn_cast_or_null<Instruction>(getPointerOperand(&I));
+ if (PI && !Legal->isConsecutivePtr(PI) &&
+ !isGatherOrScatterLegal(&I, PI, Legal))
+ NonConsecutivePtr.insert(PI);
+ }
+ }
+
+ // Ignore induction phis that are either used in uniform instructions or
+ // NonConsecutivePtr.
for (auto &Induction : *Legal->getInductionVars()) {
auto *PN = Induction.first;
auto *UpdateV = PN->getIncomingValueForBlock(TheLoop->getLoopLatch());
- // Check that the PHI is only used by the induction increment (UpdateV) or
- // by GEPs. Then check that UpdateV is only used by a compare instruction,
- // the loop header PHI, or by GEPs.
- // FIXME: Need precise def-use analysis to determine if this instruction
- // variable will be vectorized.
- if (all_of(PN->users(),
- [&](const User *U) -> bool {
- return U == UpdateV || isa<GetElementPtrInst>(U);
- }) &&
- all_of(UpdateV->users(), [&](const User *U) -> bool {
- return U == PN || isa<ICmpInst>(U) || isa<GetElementPtrInst>(U);
- })) {
+ if (std::all_of(PN->user_begin(), PN->user_end(),
+ [&](User *U) -> bool {
+ Instruction *UI = dyn_cast<Instruction>(U);
+ return U == UpdateV || !TheLoop->contains(UI) ||
+ Legal->isUniformAfterVectorization(UI) ||
+ NonConsecutivePtr.count(UI);
+ }) &&
+ std::all_of(UpdateV->user_begin(), UpdateV->user_end(),
+ [&](User *U) -> bool {
+ Instruction *UI = dyn_cast<Instruction>(U);
+ return U == PN || !TheLoop->contains(UI) ||
+ Legal->isUniformAfterVectorization(UI) ||
+ NonConsecutivePtr.count(UI);
+ })) {
VecValuesToIgnore.insert(PN);
VecValuesToIgnore.insert(UpdateV);
}
}
-
- // Ignore instructions that will not be vectorized.
- // This is for when VF > 1.
- for (BasicBlock *BB : TheLoop->blocks()) {
- for (auto &Inst : *BB) {
- switch (Inst.getOpcode())
- case Instruction::GetElementPtr: {
- // Ignore GEP if its last operand is an induction variable so that it is
- // a consecutive load/store and won't be vectorized as scatter/gather
- // pattern.
-
- GetElementPtrInst *Gep = cast<GetElementPtrInst>(&Inst);
- unsigned NumOperands = Gep->getNumOperands();
- unsigned InductionOperand = getGEPInductionOperand(Gep);
- bool GepToIgnore = true;
-
- // Check that all of the gep indices are uniform except for the
- // induction operand.
- for (unsigned i = 0; i != NumOperands; ++i) {
- if (i != InductionOperand &&
- !PSE.getSE()->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)),
- TheLoop)) {
- GepToIgnore = false;
- break;
- }
- }
-
- if (GepToIgnore)
- VecValuesToIgnore.insert(&Inst);
- break;
- }
- }
- }
}
void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr,
; CHECK-LABEL: @s173
; CHECK: load <4 x float>, <4 x float>*
-; CHECK: add nsw i64 %1, 16000
+; CHECK: add i64 %index, 16000
; CHECK: ret i32 0
}
; loop.
; CHECK-LABEL: f:
-; CHECK: vmovdqu32 %zmm{{.}}, (
+; CHECK: vmovdqu32 %zmm{{.}},
; CHECK-NOT: %ymm
define void @f(i32* %a, i32 %n) {
-; RUN: opt < %s -debug-only=loop-vectorize -loop-vectorize -vectorizer-maximize-bandwidth -O2 -S 2>&1 | FileCheck %s
+; RUN: opt < %s -debug-only=loop-vectorize -loop-vectorize -vectorizer-maximize-bandwidth -O2 -mtriple=x86_64-unknown-linux -S 2>&1 | FileCheck %s
+; RUN: opt < %s -debug-only=loop-vectorize -loop-vectorize -vectorizer-maximize-bandwidth -O2 -mtriple=x86_64-unknown-linux -mattr=+avx512f -S 2>&1 | FileCheck %s --check-prefix=AVX512F
; REQUIRES: asserts
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-
@a = global [1024 x i8] zeroinitializer, align 16
@b = global [1024 x i8] zeroinitializer, align 16
br i1 %exitcond, label %for.cond.cleanup, label %for.body
}
+define i32 @goo() {
+; For indvars.iv used in a computating chain only feeding into getelementptr or cmp,
+; it will not have vector version and the vector register usage will not exceed the
+; available vector register number.
+; CHECK-LABEL: goo
+; CHECK: LV(REG): VF = 4
+; CHECK-NEXT: LV(REG): Found max usage: 4
+; CHECK: LV(REG): VF = 8
+; CHECK-NEXT: LV(REG): Found max usage: 7
+; CHECK: LV(REG): VF = 16
+; CHECK-NEXT: LV(REG): Found max usage: 13
+entry:
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ %add.lcssa = phi i32 [ %add, %for.body ]
+ ret i32 %add.lcssa
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %s.015 = phi i32 [ 0, %entry ], [ %add, %for.body ]
+ %tmp1 = add nsw i64 %indvars.iv, 3
+ %arrayidx = getelementptr inbounds [1024 x i8], [1024 x i8]* @a, i64 0, i64 %tmp1
+ %tmp = load i8, i8* %arrayidx, align 1
+ %conv = zext i8 %tmp to i32
+ %tmp2 = add nsw i64 %indvars.iv, 2
+ %arrayidx2 = getelementptr inbounds [1024 x i8], [1024 x i8]* @b, i64 0, i64 %tmp2
+ %tmp3 = load i8, i8* %arrayidx2, align 1
+ %conv3 = zext i8 %tmp3 to i32
+ %sub = sub nsw i32 %conv, %conv3
+ %ispos = icmp sgt i32 %sub, -1
+ %neg = sub nsw i32 0, %sub
+ %tmp4 = select i1 %ispos, i32 %sub, i32 %neg
+ %add = add nsw i32 %tmp4, %s.015
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body
+}
+
define i64 @bar(i64* nocapture %a) {
; CHECK-LABEL: bar
; CHECK: LV(REG): VF = 2
%exitcond = icmp eq i64 %inc, 1024
br i1 %exitcond, label %for.cond.cleanup, label %for.body
}
+
+@d = external global [0 x i64], align 8
+@e = external global [0 x i32], align 4
+@c = external global [0 x i32], align 4
+
+define void @hoo(i32 %n) {
+; For c[i] = e[d[i]] in the loop, e[d[i]] is not consecutive but its index %tmp can
+; be gathered into a vector. For VF == 16, the vector version of %tmp will be <16 x i64>
+; so the max usage of AVX512 vector register will be 2.
+; AVX512F-LABEL: bar
+; AVX512F: LV(REG): VF = 16
+; AVX512F: LV(REG): Found max usage: 2
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds [0 x i64], [0 x i64]* @d, i64 0, i64 %indvars.iv
+ %tmp = load i64, i64* %arrayidx, align 8
+ %arrayidx1 = getelementptr inbounds [0 x i32], [0 x i32]* @e, i64 0, i64 %tmp
+ %tmp1 = load i32, i32* %arrayidx1, align 4
+ %arrayidx3 = getelementptr inbounds [0 x i32], [0 x i32]* @c, i64 0, i64 %indvars.iv
+ store i32 %tmp1, i32* %arrayidx3, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 10000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
; }
; CHECK-LABEL: @reverse_forward_induction_i64_i8(
-; CHECK: vector.body
; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-; CHECK: %vec.ind = phi <4 x i64> [ <i64 1023, i64 1022, i64 1021, i64 1020>, %vector.ph ]
-; CHECK: %step.add = add <4 x i64> %vec.ind, <i64 -4, i64 -4, i64 -4, i64 -4>
-; CHECK: trunc i64 %index to i8
+; CHECK: %offset.idx = sub i64 1023, %index
+; CHECK: %[[a0:.+]] = add i64 %offset.idx, 0
+; CHECK: %[[a1:.+]] = add i64 %offset.idx, -1
+; CHECK: %[[a2:.+]] = add i64 %offset.idx, -2
+; CHECK: %[[a3:.+]] = add i64 %offset.idx, -3
+; CHECK: %[[a4:.+]] = add i64 %offset.idx, -4
+; CHECK: %[[a5:.+]] = add i64 %offset.idx, -5
+; CHECK: %[[a6:.+]] = add i64 %offset.idx, -6
+; CHECK: %[[a7:.+]] = add i64 %offset.idx, -7
define void @reverse_forward_induction_i64_i8() {
entry:
}
; CHECK-LABEL: @reverse_forward_induction_i64_i8_signed(
-; CHECK: vector.body:
-; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-; CHECK: %vec.ind = phi <4 x i64> [ <i64 1023, i64 1022, i64 1021, i64 1020>, %vector.ph ]
-; CHECK: %step.add = add <4 x i64> %vec.ind, <i64 -4, i64 -4, i64 -4, i64 -4>
+; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+; CHECK: %offset.idx = sub i64 1023, %index
+; CHECK: %[[a0:.+]] = add i64 %offset.idx, 0
+; CHECK: %[[a1:.+]] = add i64 %offset.idx, -1
+; CHECK: %[[a2:.+]] = add i64 %offset.idx, -2
+; CHECK: %[[a3:.+]] = add i64 %offset.idx, -3
+; CHECK: %[[a4:.+]] = add i64 %offset.idx, -4
+; CHECK: %[[a5:.+]] = add i64 %offset.idx, -5
+; CHECK: %[[a6:.+]] = add i64 %offset.idx, -6
+; CHECK: %[[a7:.+]] = add i64 %offset.idx, -7
define void @reverse_forward_induction_i64_i8_signed() {
entry: