const LoopAccessInfo *LAI) const;
/// Query the target whether lowering of the llvm.get.active.lane.mask
- /// intrinsic is supported and if emitting it is desired for this loop.
- bool emitGetActiveLaneMask(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
- bool TailFolded) const;
+ /// intrinsic is supported.
+ bool emitGetActiveLaneMask() const;
/// @}
preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
AssumptionCache &AC, TargetLibraryInfo *TLI,
DominatorTree *DT, const LoopAccessInfo *LAI) = 0;
- virtual bool emitGetActiveLaneMask(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
- bool TailFolded) = 0;
+ virtual bool emitGetActiveLaneMask() = 0;
virtual bool isLegalAddImmediate(int64_t Imm) = 0;
virtual bool isLegalICmpImmediate(int64_t Imm) = 0;
virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
const LoopAccessInfo *LAI) override {
return Impl.preferPredicateOverEpilogue(L, LI, SE, AC, TLI, DT, LAI);
}
- bool emitGetActiveLaneMask(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
- bool TailFolded) override {
- return Impl.emitGetActiveLaneMask(L, LI, SE, TailFolded);
+ bool emitGetActiveLaneMask() override {
+ return Impl.emitGetActiveLaneMask();
}
bool isLegalAddImmediate(int64_t Imm) override {
return Impl.isLegalAddImmediate(Imm);
return false;
}
- bool emitGetActiveLaneMask(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
- bool TailFold) const {
+ bool emitGetActiveLaneMask() const {
return false;
}
return BaseT::preferPredicateOverEpilogue(L, LI, SE, AC, TLI, DT, LAI);
}
- bool emitGetActiveLaneMask(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
- bool TailFold) {
- return BaseT::emitGetActiveLaneMask(L, LI, SE, TailFold);
+ bool emitGetActiveLaneMask() {
+ return BaseT::emitGetActiveLaneMask();
}
int getInstructionLatency(const Instruction *I) {
return TTIImpl->preferPredicateOverEpilogue(L, LI, SE, AC, TLI, DT, LAI);
}
-bool TargetTransformInfo::emitGetActiveLaneMask(Loop *L, LoopInfo *LI,
- ScalarEvolution &SE, bool TailFolded) const {
- return TTIImpl->emitGetActiveLaneMask(L, LI, SE, TailFolded);
+bool TargetTransformInfo::emitGetActiveLaneMask() const {
+ return TTIImpl->emitGetActiveLaneMask();
}
void TargetTransformInfo::getUnrollingPreferences(
return canTailPredicateLoop(L, LI, SE, DL, LAI);
}
-bool ARMTTIImpl::emitGetActiveLaneMask(Loop *L, LoopInfo *LI,
- ScalarEvolution &SE, bool TailFolded) const {
- // TODO: if this loop is tail-folded, we want to emit the
- // llvm.get.active.lane.mask intrinsic so that this can be picked up in the
- // MVETailPredication pass that needs to know the number of elements
- // processed by this vector loop.
+bool ARMTTIImpl::emitGetActiveLaneMask() const {
+ if (!ST->hasMVEIntegerOps())
+ return false;
+
+ // TODO: Intrinsic @llvm.get.active.lane.mask is supported.
+ // It is used in the MVETailPredication pass, which requires the number of
+ // elements processed by this vector loop to setup the tail-predicated
+ // loop.
return false;
}
void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
TTI::UnrollingPreferences &UP);
- bool emitGetActiveLaneMask(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
- bool TailFolded) const;
+ bool emitGetActiveLaneMask() const;
bool shouldBuildLookupTablesForConstant(Constant *C) const {
// In the ROPI and RWPI relocation models we can't have pointers to global