// Collect all load and store ops in loop nest rooted at 'forOp'.
SmallVector<Operation *, 8> loadAndStoreOpInsts;
forOp.getOperation()->walk([&](Operation *opInst) {
- if (isa<AffineReadOpInterface>(opInst) ||
- isa<AffineWriteOpInterface>(opInst))
+ if (isa<AffineReadOpInterface, AffineWriteOpInterface>(opInst))
loadAndStoreOpInsts.push_back(opInst);
});
// No vectorization across unknown regions.
auto regions = matcher::Op([](Operation &op) -> bool {
- return op.getNumRegions() != 0 &&
- !(isa<AffineIfOp>(op) || isa<AffineForOp>(op));
+ return op.getNumRegions() != 0 && !isa<AffineIfOp, AffineForOp>(op);
});
SmallVector<NestedMatch, 8> regionsMatched;
regions.match(forOp, ®ionsMatched);
}
bool isLoadOrStore(Operation &op) {
- return isa<AffineLoadOp>(op) || isa<AffineStoreOp>(op);
+ return isa<AffineLoadOp, AffineStoreOp>(op);
}
} // end namespace matcher
if (!op)
return;
- assert((op->getNumRegions() == 0 || isa<AffineForOp>(op) ||
- isa<scf::ForOp>(op)) &&
+ assert((op->getNumRegions() == 0 || isa<AffineForOp, scf::ForOp>(op)) &&
"unexpected generic op with regions");
// Evaluate whether we should keep this def.
LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth,
ComputationSliceState *sliceState,
bool addMemRefDimBounds) {
- assert((isa<AffineReadOpInterface>(op) || isa<AffineWriteOpInterface>(op)) &&
+ assert((isa<AffineReadOpInterface, AffineWriteOpInterface>(op)) &&
"affine read/write op expected");
MemRefAccess access(op);
// This value has to be a block argument for an op that has the
// `AffineScope` trait or for an affine.for or affine.parallel.
auto *parentOp = value.cast<BlockArgument>().getOwner()->getParentOp();
- return parentOp &&
- (parentOp->hasTrait<OpTrait::AffineScope>() ||
- isa<AffineForOp>(parentOp) || isa<AffineParallelOp>(parentOp));
+ return parentOp && (parentOp->hasTrait<OpTrait::AffineScope>() ||
+ isa<AffineForOp, AffineParallelOp>(parentOp));
}
// Value can be used as a dimension id iff it meets one of the following
// This value has to be a block argument for an affine.for or an
// affine.parallel.
auto *parentOp = value.cast<BlockArgument>().getOwner()->getParentOp();
- return isa<AffineForOp>(parentOp) || isa<AffineParallelOp>(parentOp);
+ return isa<AffineForOp, AffineParallelOp>(parentOp);
}
// Affine apply operation is ok if all of its operands are ok.
// Get to the first load, store, or for op (that is not a copy nest itself).
auto curBegin =
std::find_if(block->begin(), block->end(), [&](Operation &op) {
- return (isa<AffineLoadOp>(op) || isa<AffineStoreOp>(op) ||
- isa<AffineForOp>(op)) &&
+ return isa<AffineLoadOp, AffineStoreOp, AffineForOp>(op) &&
copyNests.count(&op) == 0;
});
}
// Get to the next load or store op after 'forOp'.
curBegin = std::find_if(std::next(it), block->end(), [&](Operation &op) {
- return (isa<AffineLoadOp>(op) || isa<AffineStoreOp>(op) ||
- isa<AffineForOp>(op)) &&
+ return isa<AffineLoadOp, AffineStoreOp, AffineForOp>(op) &&
copyNests.count(&op) == 0;
});
it = curBegin;
static bool isMemRefDereferencingOp(Operation &op) {
// TODO(asabne): Support DMA Ops.
- if (isa<AffineLoadOp>(op) || isa<AffineStoreOp>(op)) {
- return true;
- }
- return false;
+ return isa<AffineLoadOp, AffineStoreOp>(op);
}
// Returns true if the individual op is loop invariant.
// The simplification of the attribute will likely simplify the op. Try to
// fold / apply canonicalization patterns when we have affine dialect ops.
- if (isa<AffineForOp>(op) || isa<AffineIfOp>(op) || isa<AffineApplyOp>(op))
+ if (isa<AffineForOp, AffineIfOp, AffineApplyOp>(op))
applyOpPatternsAndFold(op, patterns);
});
static NestedPattern &vectorTransferPattern() {
static auto pattern = matcher::Op([](Operation &op) {
- return isa<vector::TransferReadOp>(op) || isa<vector::TransferWriteOp>(op);
+ return isa<vector::TransferReadOp, vector::TransferWriteOp>(op);
});
return pattern;
}
}
static bool isSinkingBeneficiary(Operation *op) {
- return isa<ConstantOp>(op) || isa<DimOp>(op);
+ return isa<ConstantOp, DimOp>(op);
}
LogicalResult mlir::sinkOperationsIntoLaunchOp(gpu::LaunchOp launchOp) {
while (changed) {
changed = false;
func.walk([&changed](Operation *op) {
- if (!isa<AllocOp>(op) && !isa<AllocaOp>(op) && !isa<DeallocOp>(op))
+ if (!isa<AllocOp, AllocaOp, DeallocOp>(op))
return;
LLVM_DEBUG(DBGS() << "Candidate for hoisting: " << *op << "\n");
v = op->getResult(0);
}
if (v && !llvm::all_of(v.getUses(), [&](OpOperand &operand) {
- return isa<ViewLikeOpInterface>(operand.getOwner()) ||
- isa<DeallocOp>(operand.getOwner());
+ return isa<ViewLikeOpInterface, DeallocOp>(operand.getOwner());
})) {
LLVM_DEBUG(DBGS() << "Found non view-like or dealloc use: bail\n");
return;
}
// Move AllocOp before the loop.
- if (isa<AllocOp>(op) || isa<AllocaOp>(op))
+ if (isa<AllocOp, AllocaOp>(op))
loop.moveOutOfLoop({op});
else // Move DeallocOp outside of the loop.
op->moveAfter(loop);
if (interchangeVector.empty())
return failure();
// Transformation applies to generic ops only.
- if (!isa<GenericOp>(op) && !isa<IndexedGenericOp>(op))
+ if (!isa<GenericOp, IndexedGenericOp>(op))
return failure();
LinalgOp linOp = cast<LinalgOp>(op);
// Transformation applies to buffers only.
for (Type outputTensorType : linalgOp.getOutputTensorTypes())
if (!outputTensorType.cast<ShapedType>().hasStaticShape())
return failure();
- if (isa<linalg::MatmulOp>(op) || isa<linalg::FillOp>(op))
+ if (isa<linalg::MatmulOp, linalg::FillOp>(op))
return success();
auto genericOp = dyn_cast<linalg::GenericOp>(op);
auto results = parentOp->getResults();
auto operands = op.getOperands();
- if (isa<IfOp>(parentOp) || isa<ForOp>(parentOp)) {
+ if (isa<IfOp, ForOp>(parentOp)) {
if (parentOp->getNumResults() != op.getNumOperands())
return op.emitOpError() << "parent of yield must have same number of "
"results as the yield operands";
static inline bool containsReturn(Region ®ion) {
return llvm::any_of(region, [](Block &block) {
Operation *terminator = block.getTerminator();
- return isa<spirv::ReturnOp>(terminator) ||
- isa<spirv::ReturnValueOp>(terminator);
+ return isa<spirv::ReturnOp, spirv::ReturnValueOp>(terminator);
});
}
// Return true here when inlining into spv.func, spv.selection, and
// spv.loop operations.
auto *op = dest->getParentOp();
- return isa<spirv::FuncOp>(op) || isa<spirv::SelectionOp>(op) ||
- isa<spirv::LoopOp>(op);
+ return isa<spirv::FuncOp, spirv::SelectionOp, spirv::LoopOp>(op);
}
/// Returns true if the given operation 'op', that is registered to this
bool isLegalToInline(Operation *op, Region *dest,
BlockAndValueMapping &) const final {
// TODO(antiagainst): Enable inlining structured control flows with return.
- if ((isa<spirv::SelectionOp>(op) || isa<spirv::LoopOp>(op)) &&
+ if ((isa<spirv::SelectionOp, spirv::LoopOp>(op)) &&
containsReturn(op->getRegion(0)))
return false;
// TODO(antiagainst): we need to filter OpKill here to avoid inlining it to
//===----------------------------------------------------------------------===//
bool MemoryEffects::Effect::classof(const SideEffects::Effect *effect) {
- return isa<Allocate>(effect) || isa<Free>(effect) || isa<Read>(effect) ||
- isa<Write>(effect);
+ return isa<Allocate, Free, Read, Write>(effect);
}
//===----------------------------------------------------------------------===//
/// Returns true if the given pass is hidden from IR printing.
static bool isHiddenPass(Pass *pass) {
- return isa<OpToOpPassAdaptor>(pass) || isa<VerifierPass>(pass);
+ return isa<OpToOpPassAdaptor, VerifierPass>(pass);
}
static void printIR(Operation *op, bool printModuleScope, raw_ostream &out,
static StringRef getAsStringOrEmpty(const llvm::Record &record,
StringRef fieldName) {
if (auto valueInit = record.getValueInit(fieldName)) {
- if (llvm::isa<llvm::CodeInit>(valueInit) ||
- llvm::isa<llvm::StringInit>(valueInit))
+ if (llvm::isa<llvm::CodeInit, llvm::StringInit>(valueInit))
return record.getValueAsString(fieldName);
}
return "";
bool tblgen::Operator::hasAssemblyFormat() const {
auto *valueInit = def.getValueInit("assemblyFormat");
- return isa<llvm::CodeInit>(valueInit) || isa<llvm::StringInit>(valueInit);
+ return isa<llvm::CodeInit, llvm::StringInit>(valueInit);
}
StringRef tblgen::Operator::getAssemblyFormat() const {
}
bool tblgen::DagLeaf::isStringAttr() const {
- return isa<llvm::StringInit>(def) || isa<llvm::CodeInit>(def);
+ return isa<llvm::StringInit, llvm::CodeInit>(def);
}
tblgen::Constraint tblgen::DagLeaf::getAsConstraint() const {
/// Globals are inserted before the first function, if any.
Block::iterator getGlobalInsertPt() {
auto i = module.getBody()->begin();
- while (!isa<LLVMFuncOp>(i) && !isa<ModuleTerminatorOp>(i))
+ while (!isa<LLVMFuncOp, ModuleTerminatorOp>(i))
++i;
return i;
}
// another sequence type. The recursion terminates because each step removes
// one outer sequential type.
bool elementTypeSequential =
- isa<llvm::ArrayType>(elementType) || isa<llvm::VectorType>(elementType);
+ isa<llvm::ArrayType, llvm::VectorType>(elementType);
llvm::Constant *child = getLLVMConstant(
elementType,
elementTypeSequential ? splatAttr : splatAttr.getSplatValue(), loc);
// TODO(b/117228571) Replace when this is modeled through side-effects/op traits
static bool isMemRefDereferencingOp(Operation &op) {
- if (isa<AffineReadOpInterface>(op) || isa<AffineWriteOpInterface>(op) ||
- isa<AffineDmaStartOp>(op) || isa<AffineDmaWaitOp>(op))
- return true;
- return false;
+ return isa<AffineReadOpInterface, AffineWriteOpInterface, AffineDmaStartOp,
+ AffineDmaWaitOp>(op);
}
namespace {
// could still erase it if the call had no side-effects.
continue;
if (llvm::any_of(memref.getUsers(), [&](Operation *ownerOp) {
- return (!isa<AffineStoreOp>(ownerOp) && !isa<DeallocOp>(ownerOp));
+ return !isa<AffineStoreOp, DeallocOp>(ownerOp);
}))
continue;
// Temporary utility: will be replaced when DmaStart/DmaFinish abstract op's are
// added. TODO(b/117228571)
static unsigned getTagMemRefPos(Operation &dmaOp) {
- assert(isa<AffineDmaStartOp>(dmaOp) || isa<AffineDmaWaitOp>(dmaOp));
+ assert((isa<AffineDmaStartOp, AffineDmaWaitOp>(dmaOp)));
if (auto dmaStartOp = dyn_cast<AffineDmaStartOp>(dmaOp)) {
return dmaStartOp.getTagMemRefOperandIndex();
}
it != Block::reverse_iterator(opA); ++it) {
Operation *opX = &(*it);
opX->walk([&](Operation *op) {
- if (isa<AffineReadOpInterface>(op) || isa<AffineWriteOpInterface>(op)) {
+ if (isa<AffineReadOpInterface, AffineWriteOpInterface>(op)) {
if (isDependentLoadOrStoreOp(op, values)) {
lastDepOp = opX;
return WalkResult::interrupt();
SmallVectorImpl<Operation *> &loadAndStoreOps) {
bool hasIfOp = false;
forOp.walk([&](Operation *op) {
- if (isa<AffineReadOpInterface>(op) || isa<AffineWriteOpInterface>(op))
+ if (isa<AffineReadOpInterface, AffineWriteOpInterface>(op))
loadAndStoreOps.push_back(op);
else if (isa<AffineIfOp>(op))
hasIfOp = true;
// Temporary utility: will be replaced when this is modeled through
// side-effects/op traits. TODO(b/117228571)
static bool isMemRefDereferencingOp(Operation &op) {
- if (isa<AffineReadOpInterface>(op) || isa<AffineWriteOpInterface>(op) ||
- isa<AffineDmaStartOp>(op) || isa<AffineDmaWaitOp>(op))
- return true;
- return false;
+ return isa<AffineReadOpInterface, AffineWriteOpInterface, AffineDmaStartOp,
+ AffineDmaWaitOp>(op);
}
/// Return the AffineMapAttr associated with memory 'op' on 'memref'.
static inline bool hasStringAttribute(const Record &record,
StringRef fieldName) {
auto valueInit = record.getValueInit(fieldName);
- return isa<CodeInit>(valueInit) || isa<StringInit>(valueInit);
+ return isa<CodeInit, StringInit>(valueInit);
}
static std::string getArgumentName(const Operator &op, int index) {