// =============================================================================
//
// This file defines convenience types for working with Affine operations
-// in the MLIR instruction set.
+// in the MLIR operation set.
//
//===----------------------------------------------------------------------===//
void extractForInductionVars(ArrayRef<AffineForOp> forInsts,
SmallVectorImpl<Value *> *ivs);
-/// AffineBound represents a lower or upper bound in the for instruction.
+/// AffineBound represents a lower or upper bound in the for operation.
/// This class does not own the underlying operands. Instead, it refers
/// to the operands stored in the AffineForOp. Its life span should not exceed
-/// that of the for instruction it refers to.
+/// that of the for operation it refers to.
class AffineBound {
public:
- AffineForOp getAffineForOp() { return inst; }
+ AffineForOp getAffineForOp() { return op; }
AffineMap getMap() { return map; }
/// Returns an AffineValueMap representing this bound.
unsigned getNumOperands() { return opEnd - opStart; }
Value *getOperand(unsigned idx) {
- return inst.getOperation()->getOperand(opStart + idx);
+ return op.getOperation()->getOperand(opStart + idx);
}
using operand_iterator = AffineForOp::operand_iterator;
using operand_range = AffineForOp::operand_range;
- operand_iterator operand_begin() { return inst.operand_begin() + opStart; }
- operand_iterator operand_end() { return inst.operand_begin() + opEnd; }
+ operand_iterator operand_begin() { return op.operand_begin() + opStart; }
+ operand_iterator operand_end() { return op.operand_begin() + opEnd; }
operand_range getOperands() { return {operand_begin(), operand_end()}; }
private:
- // 'affine.for' instruction that contains this bound.
- AffineForOp inst;
+ // 'affine.for' operation that contains this bound.
+ AffineForOp op;
// Start and end positions of this affine bound operands in the list of
- // the containing 'affine.for' instruction operands.
+ // the containing 'affine.for' operation operands.
unsigned opStart, opEnd;
// Affine map for this bound.
AffineMap map;
- AffineBound(AffineForOp inst, unsigned opStart, unsigned opEnd, AffineMap map)
- : inst(inst), opStart(opStart), opEnd(opEnd), map(map) {}
+ AffineBound(AffineForOp op, unsigned opStart, unsigned opEnd, AffineMap map)
+ : op(op), opStart(opStart), opEnd(opEnd), map(map) {}
friend class AffineForOp;
};
static StringRef getOperationName() { return "affine.terminator"; }
private:
- friend Instruction;
+ friend Operation;
};
/// Returns true if the given Value can be used as a dimension id.
bool isVectorizableLoopBodyAlongFastestVaryingMemRefDim(
AffineForOp loop, unsigned fastestVaryingDim);
-/// Checks where SSA dominance would be violated if a for inst's body
+/// Checks where SSA dominance would be violated if a for op's body
/// operations are shifted by the specified shifts. This method checks if a
/// 'def' and all its uses have the same shift factor.
// TODO(mlir-team): extend this to check for memory-based dependence
// corresponding trait classes. This avoids them being template
// instantiated/duplicated.
namespace impl {
-bool verifyCompatibleOperandBroadcast(Instruction *op);
+bool verifyCompatibleOperandBroadcast(Operation *op);
} // namespace impl
namespace util {
class BroadcastableTwoOperandsOneResult
: public TraitBase<ConcreteType, BroadcastableTwoOperandsOneResult> {
public:
- static bool verifyTrait(Instruction *op) {
+ static bool verifyTrait(Operation *op) {
return impl::verifyCompatibleOperandBroadcast(op);
}
};
static Location getLocation();
private:
- /// Only NestedBuilder (which is used to create an instruction with a body)
+ /// Only NestedBuilder (which is used to create an operation with a body)
/// may access private members in order to implement scoping.
friend class NestedBuilder;
return *this;
}
- /// Enter an mlir::Block and setup a ScopedContext to insert instructions at
+ /// Enter an mlir::Block and setup a ScopedContext to insert operations at
/// the end of it. Since we cannot use c++ language-level scoping to implement
- /// scoping itself, we use enter/exit pairs of instructions.
+ /// scoping itself, we use enter/exit pairs of operations.
/// As a consequence we must allocate a new FuncBuilder + ScopedContext and
/// let the escape.
/// Step back "prev" times from the end of the block to set up the insertion
ScopedContext *bodyScope = nullptr;
};
-/// A LoopBuilder is a generic NestedBuilder for loop-like MLIR instructions.
+/// A LoopBuilder is a generic NestedBuilder for loop-like MLIR operations.
/// More specifically it is meant to be used as a temporary object for
/// representing any nested MLIR construct that is "related to" an mlir::Value*
/// (for now an induction variable).
BlockBuilder &operator=(BlockBuilder &other) = delete;
};
-/// Base class for ValueHandle, InstructionHandle and BlockHandle.
+/// Base class for ValueHandle, OperationHandle and BlockHandle.
/// Not meant to be used outside of these classes.
struct CapturableHandle {
protected:
/// 3. constructed state,in which case it holds a Value.
///
/// A ValueHandle is meant to capture a single Value* and should be used for
-/// instructions that have a single result. For convenience of use, we also
+/// operations that have a single result. For convenience of use, we also
/// include AffineForOp in this category although it does not return a value.
/// In the case of AffineForOp, the captured Value* is the loop induction
/// variable.
static ValueHandle createComposedAffineApply(AffineMap map,
ArrayRef<Value *> operands);
- /// Generic create for a named instruction producing a single value.
+ /// Generic create for a named operation producing a single value.
static ValueHandle create(StringRef name, ArrayRef<ValueHandle> operands,
ArrayRef<Type> resultTypes,
ArrayRef<NamedAttribute> attributes = {});
bool hasType() const { return t != Type(); }
Type getType() const { return t; }
- Instruction *getOperation() const {
+ Operation *getOperation() const {
if (!v)
return nullptr;
return v->getDefiningOp();
Value *v;
};
-/// An InstructionHandle can be used in lieu of ValueHandle to capture the
-/// instruction in cases when one does not care about, or cannot extract, a
-/// unique Value* from the instruction.
-/// This can be used for capturing zero result instructions as well as
-/// multi-result instructions that are not supported by ValueHandle.
-/// We do not distinguish further between zero and multi-result instructions at
+/// An OperationHandle can be used in lieu of ValueHandle to capture the
+/// operation in cases when one does not care about, or cannot extract, a
+/// unique Value* from the operation.
+/// This can be used for capturing zero result operations as well as
+/// multi-result operations that are not supported by ValueHandle.
+/// We do not distinguish further between zero and multi-result operations at
/// this time.
-struct InstructionHandle : public CapturableHandle {
- InstructionHandle() : inst(nullptr) {}
- InstructionHandle(Instruction *inst) : inst(inst) {}
+struct OperationHandle : public CapturableHandle {
+ OperationHandle() : op(nullptr) {}
+ OperationHandle(Operation *op) : op(op) {}
- InstructionHandle(const InstructionHandle &) = default;
- InstructionHandle &operator=(const InstructionHandle &) = default;
+ OperationHandle(const OperationHandle &) = default;
+ OperationHandle &operator=(const OperationHandle &) = default;
/// Generic mlir::Op create. This is the key to being extensible to the whole
/// of MLIR without duplicating the type system or the op definitions.
template <typename Op, typename... Args>
- static InstructionHandle create(Args... args);
+ static OperationHandle create(Args... args);
- /// Generic create for a named instruction.
- static InstructionHandle create(StringRef name,
- ArrayRef<ValueHandle> operands,
- ArrayRef<Type> resultTypes,
- ArrayRef<NamedAttribute> attributes = {});
+ /// Generic create for a named operation.
+ static OperationHandle create(StringRef name, ArrayRef<ValueHandle> operands,
+ ArrayRef<Type> resultTypes,
+ ArrayRef<NamedAttribute> attributes = {});
- operator Instruction *() { return inst; }
- Instruction *getOperation() const { return inst; }
+ operator Operation *() { return op; }
+ Operation *getOperation() const { return op; }
private:
- Instruction *inst;
+ Operation *op;
};
-/// Simple wrapper to build a generic instruction without successor blocks.
-template <typename HandleType> struct CustomInstruction {
- CustomInstruction(StringRef name) : name(name) {
+/// Simple wrapper to build a generic operation without successor blocks.
+template <typename HandleType> struct CustomOperation {
+ CustomOperation(StringRef name) : name(name) {
static_assert(std::is_same<HandleType, ValueHandle>() ||
- std::is_same<HandleType, InstructionHandle>(),
- "Only CustomInstruction<ValueHandle> or "
- "CustomInstruction<InstructionHandle> can be constructed.");
+ std::is_same<HandleType, OperationHandle>(),
+ "Only CustomOperation<ValueHandle> or "
+ "CustomOperation<OperationHandle> can be constructed.");
}
HandleType operator()(ArrayRef<ValueHandle> operands = {},
ArrayRef<Type> resultTypes = {},
};
template <typename Op, typename... Args>
-InstructionHandle InstructionHandle::create(Args... args) {
- return InstructionHandle(
- ScopedContext::getBuilder()
- ->create<Op>(ScopedContext::getLocation(), args...)
- .getOperation());
+OperationHandle OperationHandle::create(Args... args) {
+ return OperationHandle(ScopedContext::getBuilder()
+ ->create<Op>(ScopedContext::getLocation(), args...)
+ .getOperation());
}
template <typename Op, typename... Args>
ValueHandle ValueHandle::create(Args... args) {
- Instruction *inst = ScopedContext::getBuilder()
- ->create<Op>(ScopedContext::getLocation(), args...)
- .getOperation();
- if (inst->getNumResults() == 1) {
- return ValueHandle(inst->getResult(0));
- } else if (inst->getNumResults() == 0) {
- if (auto f = inst->dyn_cast<AffineForOp>()) {
+ Operation *op = ScopedContext::getBuilder()
+ ->create<Op>(ScopedContext::getLocation(), args...)
+ .getOperation();
+ if (op->getNumResults() == 1) {
+ return ValueHandle(op->getResult(0));
+ } else if (op->getNumResults() == 0) {
+ if (auto f = op->dyn_cast<AffineForOp>()) {
return ValueHandle(f.getInductionVar());
}
}
- llvm_unreachable("unsupported instruction, use an InstructionHandle instead");
+ llvm_unreachable("unsupported operation, use an OperationHandle instead");
}
namespace op {
/// Emits a `store`.
// NOLINTNEXTLINE: unconventional-assign-operator
- InstructionHandle operator=(const TemplatedIndexedValue &rhs) {
+ OperationHandle operator=(const TemplatedIndexedValue &rhs) {
ValueHandle rrhs(rhs);
return Store(rrhs, getBase(), {indices.begin(), indices.end()});
}
// NOLINTNEXTLINE: unconventional-assign-operator
- InstructionHandle operator=(ValueHandle rhs) {
+ OperationHandle operator=(ValueHandle rhs) {
return Store(rhs, getBase(), {indices.begin(), indices.end()});
}
ValueHandle operator-(ValueHandle e);
ValueHandle operator*(ValueHandle e);
ValueHandle operator/(ValueHandle e);
- InstructionHandle operator+=(ValueHandle e);
- InstructionHandle operator-=(ValueHandle e);
- InstructionHandle operator*=(ValueHandle e);
- InstructionHandle operator/=(ValueHandle e);
+ OperationHandle operator+=(ValueHandle e);
+ OperationHandle operator-=(ValueHandle e);
+ OperationHandle operator*=(ValueHandle e);
+ OperationHandle operator/=(ValueHandle e);
ValueHandle operator+(TemplatedIndexedValue e) {
return *this + static_cast<ValueHandle>(e);
}
ValueHandle operator/(TemplatedIndexedValue e) {
return *this / static_cast<ValueHandle>(e);
}
- InstructionHandle operator+=(TemplatedIndexedValue e) {
+ OperationHandle operator+=(TemplatedIndexedValue e) {
return this->operator+=(static_cast<ValueHandle>(e));
}
- InstructionHandle operator-=(TemplatedIndexedValue e) {
+ OperationHandle operator-=(TemplatedIndexedValue e) {
return this->operator-=(static_cast<ValueHandle>(e));
}
- InstructionHandle operator*=(TemplatedIndexedValue e) {
+ OperationHandle operator*=(TemplatedIndexedValue e) {
return this->operator*=(static_cast<ValueHandle>(e));
}
- InstructionHandle operator/=(TemplatedIndexedValue e) {
+ OperationHandle operator/=(TemplatedIndexedValue e) {
return this->operator/=(static_cast<ValueHandle>(e));
}
}
template <typename Load, typename Store>
-InstructionHandle
-TemplatedIndexedValue<Load, Store>::operator+=(ValueHandle e) {
+OperationHandle TemplatedIndexedValue<Load, Store>::operator+=(ValueHandle e) {
using op::operator+;
return Store(*this + e, getBase(), {indices.begin(), indices.end()});
}
template <typename Load, typename Store>
-InstructionHandle
-TemplatedIndexedValue<Load, Store>::operator-=(ValueHandle e) {
+OperationHandle TemplatedIndexedValue<Load, Store>::operator-=(ValueHandle e) {
using op::operator-;
return Store(*this - e, getBase(), {indices.begin(), indices.end()});
}
template <typename Load, typename Store>
-InstructionHandle
-TemplatedIndexedValue<Load, Store>::operator*=(ValueHandle e) {
+OperationHandle TemplatedIndexedValue<Load, Store>::operator*=(ValueHandle e) {
using op::operator*;
return Store(*this * e, getBase(), {indices.begin(), indices.end()});
}
template <typename Load, typename Store>
-InstructionHandle
-TemplatedIndexedValue<Load, Store>::operator/=(ValueHandle e) {
+OperationHandle TemplatedIndexedValue<Load, Store>::operator/=(ValueHandle e) {
using op::operator/;
return Store(*this / e, getBase(), {indices.begin(), indices.end()});
}
};
/// Provides a set of first class intrinsics.
-/// In the future, most of intrinsics reated to Instruction that don't contain
-/// other instructions should be Tablegen'd.
+/// In the future, most of intrinsics related to Operation that don't contain
+/// other operations should be Tablegen'd.
namespace intrinsics {
namespace detail {
-/// Helper structure to be used with ValueBuilder / InstructionBuilder.
+/// Helper structure to be used with ValueBuilder / OperationBuilder.
/// It serves the purpose of removing boilerplate specialization for the sole
/// purpose of implicitly converting ArrayRef<ValueHandle> -> ArrayRef<Value*>.
class ValueHandleArray {
ValueBuilder() : ValueHandle(ValueHandle::create<Op>()) {}
};
-template <typename Op> struct InstructionBuilder : public InstructionHandle {
+template <typename Op> struct OperationBuilder : public OperationHandle {
template <typename... Args>
- InstructionBuilder(Args... args)
- : InstructionHandle(
- InstructionHandle::create<Op>(detail::unpack(args)...)) {}
- InstructionBuilder(ArrayRef<ValueHandle> vs)
- : InstructionHandle(InstructionHandle::create<Op>(detail::unpack(vs))) {}
+ OperationBuilder(Args... args)
+ : OperationHandle(OperationHandle::create<Op>(detail::unpack(args)...)) {}
+ OperationBuilder(ArrayRef<ValueHandle> vs)
+ : OperationHandle(OperationHandle::create<Op>(detail::unpack(vs))) {}
template <typename... Args>
- InstructionBuilder(ArrayRef<ValueHandle> vs, Args... args)
- : InstructionHandle(InstructionHandle::create<Op>(
- detail::unpack(vs), detail::unpack(args)...)) {}
+ OperationBuilder(ArrayRef<ValueHandle> vs, Args... args)
+ : OperationHandle(OperationHandle::create<Op>(detail::unpack(vs),
+ detail::unpack(args)...)) {}
template <typename T, typename... Args>
- InstructionBuilder(T t, ArrayRef<ValueHandle> vs, Args... args)
- : InstructionHandle(InstructionHandle::create<Op>(
+ OperationBuilder(T t, ArrayRef<ValueHandle> vs, Args... args)
+ : OperationHandle(OperationHandle::create<Op>(
detail::unpack(t), detail::unpack(vs), detail::unpack(args)...)) {}
template <typename T1, typename T2, typename... Args>
- InstructionBuilder(T1 t1, T2 t2, ArrayRef<ValueHandle> vs, Args... args)
- : InstructionHandle(InstructionHandle::create<Op>(
+ OperationBuilder(T1 t1, T2 t2, ArrayRef<ValueHandle> vs, Args... args)
+ : OperationHandle(OperationHandle::create<Op>(
detail::unpack(t1), detail::unpack(t2), detail::unpack(vs),
detail::unpack(args)...)) {}
- InstructionBuilder() : InstructionHandle(InstructionHandle::create<Op>()) {}
+ OperationBuilder() : OperationHandle(OperationHandle::create<Op>()) {}
};
using alloc = ValueBuilder<AllocOp>;
using constant_float = ValueBuilder<ConstantFloatOp>;
using constant_index = ValueBuilder<ConstantIndexOp>;
using constant_int = ValueBuilder<ConstantIntOp>;
-using dealloc = InstructionBuilder<DeallocOp>;
+using dealloc = OperationBuilder<DeallocOp>;
using load = ValueBuilder<LoadOp>;
-using ret = InstructionBuilder<ReturnOp>;
+using ret = OperationBuilder<ReturnOp>;
using select = ValueBuilder<SelectOp>;
-using store = InstructionBuilder<StoreOp>;
+using store = OperationBuilder<StoreOp>;
using vector_type_cast = ValueBuilder<VectorTypeCastOp>;
/// Branches into the mlir::Block* captured by BlockHandle `b` with `operands`.
///
/// Prerequisites:
/// All Handles have already captured previously constructed IR objects.
-InstructionHandle br(BlockHandle bh, ArrayRef<ValueHandle> operands);
+OperationHandle br(BlockHandle bh, ArrayRef<ValueHandle> operands);
/// Creates a new mlir::Block* and branches to it from the current block.
/// Argument types are specified by `operands`.
/// Captures the new block in `bh` and the actual `operands` in `captures`. To
/// insert the new mlir::Block*, a local ScopedContext is constructed and
-/// released to the current block. The branch instruction is then added to the
+/// released to the current block. The branch operation is then added to the
/// new block.
///
/// Prerequisites:
/// All `operands` have already captured an mlir::Value*
/// captures.size() == operands.size()
/// captures and operands are pairwise of the same type.
-InstructionHandle br(BlockHandle *bh, ArrayRef<ValueHandle *> captures,
- ArrayRef<ValueHandle> operands);
+OperationHandle br(BlockHandle *bh, ArrayRef<ValueHandle *> captures,
+ ArrayRef<ValueHandle> operands);
/// Branches into the mlir::Block* captured by BlockHandle `trueBranch` with
/// `trueOperands` if `cond` evaluates to `true` (resp. `falseBranch` and
///
/// Prerequisites:
/// All Handles have captured previouly constructed IR objects.
-InstructionHandle cond_br(ValueHandle cond, BlockHandle trueBranch,
- ArrayRef<ValueHandle> trueOperands,
- BlockHandle falseBranch,
- ArrayRef<ValueHandle> falseOperands);
+OperationHandle cond_br(ValueHandle cond, BlockHandle trueBranch,
+ ArrayRef<ValueHandle> trueOperands,
+ BlockHandle falseBranch,
+ ArrayRef<ValueHandle> falseOperands);
/// Eagerly creates new mlir::Block* with argument types specified by
/// `trueOperands`/`falseOperands`.
/// Captures the new blocks in `trueBranch`/`falseBranch` and the arguments in
/// `trueCaptures/falseCaptures`.
/// To insert the new mlir::Block*, a local ScopedContext is constructed and
-/// released. The branch instruction is then added in the original location and
+/// released. The branch operation is then added in the original location and
/// targeting the eagerly constructed blocks.
///
/// Prerequisites:
/// `falseCaptures`.size() == `falseOperands`.size()
/// `trueCaptures` and `trueOperands` are pairwise of the same type
/// `falseCaptures` and `falseOperands` are pairwise of the same type.
-InstructionHandle cond_br(ValueHandle cond, BlockHandle *trueBranch,
- ArrayRef<ValueHandle *> trueCaptures,
- ArrayRef<ValueHandle> trueOperands,
- BlockHandle *falseBranch,
- ArrayRef<ValueHandle *> falseCaptures,
- ArrayRef<ValueHandle> falseOperands);
+OperationHandle cond_br(ValueHandle cond, BlockHandle *trueBranch,
+ ArrayRef<ValueHandle *> trueCaptures,
+ ArrayRef<ValueHandle> trueOperands,
+ BlockHandle *falseBranch,
+ ArrayRef<ValueHandle *> falseCaptures,
+ ArrayRef<ValueHandle> falseOperands);
} // namespace intrinsics
} // namespace edsc
} // namespace mlir
/// Specialization of walk to only visit operations of 'OpTy'.
template <typename OpTy> void walk(std::function<void(OpTy)> callback) {
- walk([&](Operation *inst) {
- if (auto op = inst->dyn_cast<OpTy>())
+ walk([&](Operation *opInst) {
+ if (auto op = opInst->dyn_cast<OpTy>())
callback(op);
});
}
/// Specialization of walkPostOrder to only visit operations of 'OpTy'.
template <typename OpTy>
void walkPostOrder(std::function<void(OpTy)> callback) {
- walkPostOrder([&](Operation *inst) {
- if (auto op = inst->dyn_cast<OpTy>())
+ walkPostOrder([&](Operation *opInst) {
+ if (auto op = opInst->dyn_cast<OpTy>())
callback(op);
});
}
return os;
}
-/// Temporary typedef to Instruction to while the codebase transitions to
-/// Operation.
-using Instruction = Operation;
-
/// This class implements the const/non-const operand iterators for the
/// Operation class in terms of getOperand(idx).
class OperandIterator final
class Block;
class Dialect;
class Operation;
-using Instruction = Operation;
struct OperationState;
class OpAsmParser;
class OpAsmParserResult;
>];
}
-// Class for arithmetic binary instructions.
+// Class for arithmetic binary operations.
class LLVM_ArithmeticOp<string mnemonic, string builderFunc,
list<OpTrait> traits = []> :
LLVM_OneResultOp<mnemonic,
Arguments<(ins LLVM_Type:$lhs, LLVM_Type:$rhs)>,
LLVM_Builder<"$res = builder." # builderFunc # "($lhs, $rhs);">;
-// Integer binary instructions.
+// Integer binary operations.
def LLVM_AddOp : LLVM_ArithmeticOp<"add", "CreateAdd", [Commutative]>;
def LLVM_SubOp : LLVM_ArithmeticOp<"sub", "CreateSub">;
def LLVM_MulOp : LLVM_ArithmeticOp<"mul", "CreateMul", [Commutative]>;
def LLVM_URemOp : LLVM_ArithmeticOp<"urem", "CreateURem">;
def LLVM_SRemOp : LLVM_ArithmeticOp<"srem", "CreateSRem">;
-// Other integer instructions.
+// Other integer operations.
def LLVM_ICmpOp : LLVM_OneResultOp<"icmp", [NoSideEffect]>,
Arguments<(ins I32Attr:$predicate, LLVM_Type:$lhs,
LLVM_Type:$rhs)> {
}];
}
-// Floating point binary instructions.
+// Floating point binary operations.
def LLVM_FAddOp : LLVM_ArithmeticOp<"fadd", "CreateFAdd">;
def LLVM_FSubOp : LLVM_ArithmeticOp<"fsub", "CreateFSub">;
def LLVM_FMulOp : LLVM_ArithmeticOp<"fmul", "CreateFMul">;
def LLVM_FDivOp : LLVM_ArithmeticOp<"fdiv", "CreateFDiv">;
def LLVM_FRemOp : LLVM_ArithmeticOp<"frem", "CreateFRem">;
-// Memory-related instructions.
+// Memory-related operations.
def LLVM_AllocaOp : LLVM_OneResultOp<"alloca">,
Arguments<(ins LLVM_Type:$arraySize)> {
string llvmBuilder = [{
LLVM_Builder<"$res = builder.CreateBitCast($arg, $_resultType);">;
-// Call-related instructions.
+// Call-related operations.
def LLVM_CallOp : LLVM_Op<"call">,
Arguments<(ins OptionalAttr<FunctionAttr>:$callee,
Variadic<LLVM_Type>)>,
- Results<(outs Variadic<LLVM_Type>)>,
+ Results<(outs Variadic<LLVM_Type>)>,
LLVM_TwoBuilders<LLVM_OneResultOpBuilder,
LLVM_ZeroResultOpBuilder> {
let verifier = [{
}];
}
-// Misc instructions.
+// Misc operations.
def LLVM_SelectOp
: LLVM_OneResultOp<"select", [NoSideEffect]>,
Arguments<(ins LLVM_Type:$condition, LLVM_Type:$trueValue,
// =============================================================================
//
// This file defines convenience types for working with standard operations
-// in the MLIR instruction set.
+// in the MLIR operation set.
//
//===----------------------------------------------------------------------===//
namespace detail {
/// A custom binary operation printer that omits the "std." prefix from the
/// operation names.
-void printStandardBinaryOp(Instruction *op, OpAsmPrinter *p);
+void printStandardBinaryOp(Operation *op, OpAsmPrinter *p);
} // namespace detail
class StandardOpsDialect : public Dialect {
MLIRContext *context);
};
-/// The "br" operation represents a branch instruction in a function.
+/// The "br" operation represents a branch operation in a function.
/// The operation takes variable number of operands and produces no results.
/// The operand number and types for each successor must match the
/// arguments of the block successor. For example:
/// Since integers are signless, the predicate also explicitly indicates
/// whether to interpret the operands as signed or unsigned integers for
/// less/greater than comparisons. For the sake of readability by humans,
-/// custom assembly form for the instruction uses a string-typed attribute for
+/// custom assembly form for the operation uses a string-typed attribute for
/// the predicate. The value of this attribute corresponds to lower-cased name
/// of the predicate constant, e.g., "slt" means "signed less than". The string
/// representation of the attribute is merely a syntactic sugar and is converted
Attribute constantFold(ArrayRef<Attribute> operands, MLIRContext *context);
};
-/// The "cond_br" operation represents a conditional branch instruction in a
+/// The "cond_br" operation represents a conditional branch operation in a
/// function. The operation takes variable number of operands and produces
/// no results. The operand number and types for each successor must match the
// arguments of the block successor. For example:
APFloat getValue() { return getAttrOfType<FloatAttr>("value").getValue(); }
- static bool isClassFor(Instruction *op);
+ static bool isClassFor(Operation *op);
};
/// This is a refinement of the "constant" op for the case where it is
int64_t getValue() { return getAttrOfType<IntegerAttr>("value").getInt(); }
- static bool isClassFor(Instruction *op);
+ static bool isClassFor(Operation *op);
};
/// This is a refinement of the "constant" op for the case where it is
int64_t getValue() { return getAttrOfType<IntegerAttr>("value").getInt(); }
- static bool isClassFor(Instruction *op);
+ static bool isClassFor(Operation *op);
};
/// The "dealloc" operation frees the region of memory referenced by a memref
return getSrcMemRef()->getType().cast<MemRefType>().getRank();
}
// Returns the source memerf indices for this DMA operation.
- llvm::iterator_range<Instruction::operand_iterator> getSrcIndices() {
+ llvm::iterator_range<Operation::operand_iterator> getSrcIndices() {
return {getOperation()->operand_begin() + 1,
getOperation()->operand_begin() + 1 + getSrcMemRefRank()};
}
}
// Returns the destination memref indices for this DMA operation.
- llvm::iterator_range<Instruction::operand_iterator> getDstIndices() {
+ llvm::iterator_range<Operation::operand_iterator> getDstIndices() {
return {getOperation()->operand_begin() + 1 + getSrcMemRefRank() + 1,
getOperation()->operand_begin() + 1 + getSrcMemRefRank() + 1 +
getDstMemRefRank()};
}
// Returns the tag memref index for this DMA operation.
- llvm::iterator_range<Instruction::operand_iterator> getTagIndices() {
+ llvm::iterator_range<Operation::operand_iterator> getTagIndices() {
unsigned tagIndexStartPos =
1 + getSrcMemRefRank() + 1 + getDstMemRefRank() + 1 + 1;
return {getOperation()->operand_begin() + tagIndexStartPos,
Value *getTagMemRef() { return getOperand(0); }
// Returns the tag memref index for this DMA operation.
- llvm::iterator_range<Instruction::operand_iterator> getTagIndices() {
+ llvm::iterator_range<Operation::operand_iterator> getTagIndices() {
return {getOperation()->operand_begin() + 1,
getOperation()->operand_begin() + 1 + getTagMemRefRank()};
}
Value *getAggregate() { return getOperand(0); }
- llvm::iterator_range<Instruction::operand_iterator> getIndices() {
+ llvm::iterator_range<Operation::operand_iterator> getIndices() {
return {getOperation()->operand_begin() + 1, getOperation()->operand_end()};
}
return getMemRef()->getType().cast<MemRefType>();
}
- llvm::iterator_range<Instruction::operand_iterator> getIndices() {
+ llvm::iterator_range<Operation::operand_iterator> getIndices() {
return {getOperation()->operand_begin() + 1, getOperation()->operand_end()};
}
bool verify();
};
-/// The "return" operation represents a return instruction within a function.
+/// The "return" operation represents a return operation within a function.
/// The operation takes variable number of operands and produces no results.
/// The operand number and types must match the signature of the function
/// that contains the operation. For example:
/// The "store" op writes an element to a memref specified by an index list.
/// The arity of indices is the rank of the memref (i.e. if the memref being
/// stored to is of rank 3, then 3 indices are required for the store following
-/// the memref identifier). The store instruction does not produce a result.
+/// the memref identifier). The store operation does not produce a result.
///
/// In the following example, the ssa value '%v' is stored in memref '%A' at
/// indices [%i, %j]:
return getMemRef()->getType().cast<MemRefType>();
}
- llvm::iterator_range<Instruction::operand_iterator> getIndices() {
+ llvm::iterator_range<Operation::operand_iterator> getIndices() {
return {getOperation()->operand_begin() + 2, getOperation()->operand_end()};
}
};
/// Prints dimension and symbol list.
-void printDimAndSymbolList(Instruction::operand_iterator begin,
- Instruction::operand_iterator end, unsigned numDims,
+void printDimAndSymbolList(Operation::operand_iterator begin,
+ Operation::operand_iterator end, unsigned numDims,
OpAsmPrinter *p);
/// Parses dimension and symbol list and returns true if parsing failed.
MemRefType getMemRefType() {
return getMemRef()->getType().cast<MemRefType>();
}
- llvm::iterator_range<Instruction::operand_iterator> getIndices();
+ llvm::iterator_range<Operation::operand_iterator> getIndices();
Optional<Value *> getPaddingValue();
AffineMap getPermutationMap();
MemRefType getMemRefType() {
return getMemRef()->getType().cast<MemRefType>();
}
- llvm::iterator_range<Instruction::operand_iterator> getIndices();
+ llvm::iterator_range<Operation::operand_iterator> getIndices();
AffineMap getPermutationMap();
static bool parse(OpAsmParser *parser, OperationState *result);
return false;
if (auto *op = value->getDefiningOp()) {
- // Top level instruction or constant operation is ok.
+ // Top level operation or constant operation is ok.
if (op->getParentOp() == nullptr || op->isa<ConstantOp>())
return true;
// Affine apply operation is ok if all of its operands are ok.
return false;
if (auto *op = value->getDefiningOp()) {
- // Top level instruction or constant operation is ok.
+ // Top level operation or constant operation is ok.
if (op->getParentOp() == nullptr || op->isa<ConstantOp>())
return true;
// Affine apply operation is ok if all of its operands are ok.
/// errors.
template <typename OpTy>
static bool verifyDimAndSymbolIdentifiers(OpTy &op,
- Instruction::operand_range operands,
+ Operation::operand_range operands,
unsigned numDims) {
unsigned opIt = 0;
for (auto *operand : operands) {
? t->getDefiningOp()->dyn_cast<AffineApplyOp>()
: AffineApplyOp();
if (affineApply) {
- // a. Compose affine.apply instructions.
+ // a. Compose affine.apply operations.
LLVM_DEBUG(affineApply.getOperation()->print(
dbgs() << "\nCompose AffineApplyOp recursively: "));
AffineMap affineApplyMap = affineApply.getAffineMap();
SimplifyAffineApply(MLIRContext *context)
: RewritePattern(AffineApplyOp::getOperationName(), 1, context) {}
- PatternMatchResult matchAndRewrite(Instruction *op,
+ PatternMatchResult matchAndRewrite(Operation *op,
PatternRewriter &rewriter) const override {
auto apply = op->cast<AffineApplyOp>();
auto map = apply.getAffineMap();
AffineForLoopBoundFolder(MLIRContext *context)
: RewritePattern(AffineForOp::getOperationName(), 1, context) {}
- PatternMatchResult matchAndRewrite(Instruction *op,
+ PatternMatchResult matchAndRewrite(Operation *op,
PatternRewriter &rewriter) const override {
auto forOp = op->cast<AffineForOp>();
auto foldLowerOrUpperBound = [&forOp](bool lower) {
return false;
}
-bool OpTrait::impl::verifyCompatibleOperandBroadcast(Instruction *op) {
+bool OpTrait::impl::verifyCompatibleOperandBroadcast(Operation *op) {
assert(op->getNumOperands() == 2 &&
"only support broadcast check on two operands");
assert(op->getNumResults() == 1 &&
mlir::edsc::ValueHandle::createComposedAffineApply(AffineMap map,
ArrayRef<Value *> operands) {
assert(ScopedContext::getBuilder() && "Unexpected null builder");
- Instruction *inst =
+ Operation *op =
makeComposedAffineApply(ScopedContext::getBuilder(),
ScopedContext::getLocation(), map, operands)
.getOperation();
- assert(inst->getNumResults() == 1 && "Not a single result AffineApply");
- return ValueHandle(inst->getResult(0));
+ assert(op->getNumResults() == 1 && "Not a single result AffineApply");
+ return ValueHandle(op->getResult(0));
}
ValueHandle ValueHandle::create(StringRef name, ArrayRef<ValueHandle> operands,
ArrayRef<Type> resultTypes,
ArrayRef<NamedAttribute> attributes) {
- Instruction *inst =
- InstructionHandle::create(name, operands, resultTypes, attributes);
- if (inst->getNumResults() == 1) {
- return ValueHandle(inst->getResult(0));
+ Operation *op =
+ OperationHandle::create(name, operands, resultTypes, attributes);
+ if (op->getNumResults() == 1) {
+ return ValueHandle(op->getResult(0));
}
- if (auto f = inst->dyn_cast<AffineForOp>()) {
+ if (auto f = op->dyn_cast<AffineForOp>()) {
return ValueHandle(f.getInductionVar());
}
- llvm_unreachable("unsupported instruction, use an InstructionHandle instead");
+ llvm_unreachable("unsupported operation, use an OperationHandle instead");
}
-InstructionHandle
-InstructionHandle::create(StringRef name, ArrayRef<ValueHandle> operands,
- ArrayRef<Type> resultTypes,
- ArrayRef<NamedAttribute> attributes) {
+OperationHandle OperationHandle::create(StringRef name,
+ ArrayRef<ValueHandle> operands,
+ ArrayRef<Type> resultTypes,
+ ArrayRef<NamedAttribute> attributes) {
OperationState state(ScopedContext::getContext(),
ScopedContext::getLocation(), name);
SmallVector<Value *, 4> ops(operands.begin(), operands.end());
for (const auto &attr : attributes) {
state.addAttribute(attr.first, attr.second);
}
- return InstructionHandle(ScopedContext::getBuilder()->createOperation(state));
+ return OperationHandle(ScopedContext::getBuilder()->createOperation(state));
}
BlockHandle mlir::edsc::BlockHandle::create(ArrayRef<Type> argTypes) {
unsigned &numSymbols) {
AffineExpr d;
Value *resultVal = nullptr;
- auto *inst = val->getDefiningOp();
- auto constant = inst ? inst->dyn_cast<ConstantIndexOp>() : ConstantIndexOp();
+ auto *op = val->getDefiningOp();
+ auto constant = op ? op->dyn_cast<ConstantIndexOp>() : ConstantIndexOp();
if (constant) {
d = getAffineConstantExpr(constant.getValue(), context);
} else if (isValidSymbol(val) && !isValidDim(val)) {
using namespace mlir;
using namespace mlir::edsc;
-InstructionHandle mlir::edsc::intrinsics::br(BlockHandle bh,
- ArrayRef<ValueHandle> operands) {
+OperationHandle mlir::edsc::intrinsics::br(BlockHandle bh,
+ ArrayRef<ValueHandle> operands) {
assert(bh && "Expected already captured BlockHandle");
for (auto &o : operands) {
(void)o;
assert(o && "Expected already captured ValueHandle");
}
SmallVector<Value *, 4> ops(operands.begin(), operands.end());
- return InstructionHandle::create<BranchOp>(bh.getBlock(), ops);
+ return OperationHandle::create<BranchOp>(bh.getBlock(), ops);
}
static void enforceEmptyCapturesMatchOperands(ArrayRef<ValueHandle *> captures,
ArrayRef<ValueHandle> operands) {
}
}
-InstructionHandle mlir::edsc::intrinsics::br(BlockHandle *bh,
- ArrayRef<ValueHandle *> captures,
- ArrayRef<ValueHandle> operands) {
+OperationHandle mlir::edsc::intrinsics::br(BlockHandle *bh,
+ ArrayRef<ValueHandle *> captures,
+ ArrayRef<ValueHandle> operands) {
assert(!*bh && "Unexpected already captured BlockHandle");
enforceEmptyCapturesMatchOperands(captures, operands);
BlockBuilder(bh, captures)({/* no body */});
SmallVector<Value *, 4> ops(operands.begin(), operands.end());
- return InstructionHandle::create<BranchOp>(bh->getBlock(), ops);
+ return OperationHandle::create<BranchOp>(bh->getBlock(), ops);
}
-InstructionHandle
+OperationHandle
mlir::edsc::intrinsics::cond_br(ValueHandle cond, BlockHandle trueBranch,
ArrayRef<ValueHandle> trueOperands,
BlockHandle falseBranch,
ArrayRef<ValueHandle> falseOperands) {
SmallVector<Value *, 4> trueOps(trueOperands.begin(), trueOperands.end());
SmallVector<Value *, 4> falseOps(falseOperands.begin(), falseOperands.end());
- return InstructionHandle::create<CondBranchOp>(
+ return OperationHandle::create<CondBranchOp>(
cond, trueBranch.getBlock(), trueOps, falseBranch.getBlock(), falseOps);
}
-InstructionHandle mlir::edsc::intrinsics::cond_br(
+OperationHandle mlir::edsc::intrinsics::cond_br(
ValueHandle cond, BlockHandle *trueBranch,
ArrayRef<ValueHandle *> trueCaptures, ArrayRef<ValueHandle> trueOperands,
BlockHandle *falseBranch, ArrayRef<ValueHandle *> falseCaptures,
BlockBuilder(falseBranch, falseCaptures)({/* no body */});
SmallVector<Value *, 4> trueOps(trueOperands.begin(), trueOperands.end());
SmallVector<Value *, 4> falseOps(falseOperands.begin(), falseOperands.end());
- return InstructionHandle::create<CondBranchOp>(
+ return OperationHandle::create<CondBranchOp>(
cond, trueBranch->getBlock(), trueOps, falseBranch->getBlock(), falseOps);
}
#include "mlir/EDSC/reference-impl.inc"
void LowerEDSCTestPass::runOnFunction() {
- getFunction().walk([](Instruction *op) {
+ getFunction().walk([](Operation *op) {
if (op->getName().getStringRef() == "print") {
auto opName = op->getAttrOfType<StringAttr>("op");
if (!opName) {
using namespace mlir::edsc::detail;
static void printDefininingStatement(llvm::raw_ostream &os, Value &v) {
- auto *inst = v.getDefiningOp();
- if (inst) {
- inst->print(os);
+ auto *op = v.getDefiningOp();
+ if (op) {
+ op->print(os);
return;
}
if (auto forInst = getForInductionVarOwner(&v)) {
for (Value *v : values) {
auto *def = v->getDefiningOp();
(void)def;
- // There may be no defining instruction if the value is a function
+ // There may be no defining operation if the value is a function
// argument. We accept such values.
assert((!def || def->isa<ConstantIndexOp>() || def->isa<AffineApplyOp>() ||
def->isa<AffineForOp>() || def->isa<DimOp>()) &&
bool expectedEmpty = false;
if (e.isa<UnaryExpr>() || e.isa<BinaryExpr>() || e.isa<TernaryExpr>() ||
e.isa<VariadicExpr>()) {
- // Emit any successors before the instruction with successors. At this
+ // Emit any successors before the operation with successors. At this
// point, all values defined by the current block must have been bound, the
- // current instruction with successors cannot define new values, so the
+ // current operation with successors cannot define new values, so the
// successor can use those values.
assert(e.getSuccessors().empty() || e.getResultTypes().empty() &&
"an operation with successors must "
buildExprs(successorArgs[i], b, ssaBindings, blockBindings));
}
- Instruction *inst = b.createOperation(state);
- return llvm::to_vector<4>(inst->getResults());
+ Operation *op = b.createOperation(state);
+ return llvm::to_vector<4>(op->getResults());
}
static AffineExpr createOperandAffineExpr(Expr e, int64_t position,
void recordTypeReference(Type ty) { usedTypes.insert(ty); }
// Visit functions.
- void visitOperation(Operation *inst);
+ void visitOperation(Operation *op);
void visitType(Type type);
void visitAttribute(Attribute attr);
};
} // end anonymous namespace
-// TODO Support visiting other types/instructions when implemented.
+// TODO Support visiting other types/operations when implemented.
void ModuleState::visitType(Type type) {
recordTypeReference(type);
if (auto funcType = type.dyn_cast<FunctionType>()) {
}
}
-void ModuleState::visitOperation(Operation *inst) {
+void ModuleState::visitOperation(Operation *op) {
// Visit all the types used in the operation.
- for (auto *operand : inst->getOperands())
+ for (auto *operand : op->getOperands())
visitType(operand->getType());
- for (auto *result : inst->getResults())
+ for (auto *result : op->getResults())
visitType(result->getType());
// Visit each of the attributes.
- for (auto elt : inst->getAttrs())
+ for (auto elt : op->getAttrs())
visitAttribute(elt.second);
}
// Print the function signature.
void printFunctionSignature();
- // Methods to print instructions.
- void print(Operation *inst);
+ // Methods to print operations.
+ void print(Operation *op);
void print(Block *block, bool printBlockArgs = true,
bool printBlockTerminator = true);
os.indent(currentIndent) << "}";
}
- // Number of spaces used for indenting nested instructions.
+ // Number of spaces used for indenting nested operations.
const static unsigned indentWidth = 2;
protected:
/// continuously throughout regions. In particular, we traverse the regions
/// held by operations and number values in depth-first pre-order.
void FunctionPrinter::numberValuesInBlock(Block &block) {
- // Each block gets a unique ID, and all of the instructions within it get
+ // Each block gets a unique ID, and all of the operations within it get
// numbered as well.
blockIDs[&block] = nextBlockID++;
for (auto *arg : block.getArguments())
numberValueID(arg);
- for (auto &inst : block) {
- // We number instruction that have results, and we only number the first
+ for (auto &op : block) {
+ // We number operation that have results, and we only number the first
// result.
- if (inst.getNumResults() != 0)
- numberValueID(inst.getResult(0));
- for (auto ®ion : inst.getRegions())
+ if (op.getNumResults() != 0)
+ numberValueID(op.getResult(0));
+ for (auto ®ion : op.getRegions())
for (auto &block : region)
numberValuesInBlock(block);
}
auto range = llvm::make_range(
block->getOperations().begin(),
std::prev(block->getOperations().end(), printBlockTerminator ? 0 : 1));
- for (auto &inst : range) {
- print(&inst);
+ for (auto &op : range) {
+ print(&op);
os << '\n';
}
currentIndent -= indentWidth;
}
-void FunctionPrinter::print(Operation *inst) {
+void FunctionPrinter::print(Operation *op) {
os.indent(currentIndent);
- printOperation(inst);
- printTrailingLocation(inst->getLoc());
+ printOperation(op);
+ printTrailingLocation(op->getLoc());
}
void FunctionPrinter::printValueID(Value *value, bool printResultNo) const {
int resultNo = -1;
auto lookupValue = value;
- // If this is a reference to the result of a multi-result instruction or
- // instruction, print out the # identifier and make sure to map our lookup
- // to the first result of the instruction.
+ // If this is a reference to the result of a multi-result operation or
+ // operation, print out the # identifier and make sure to map our lookup
+ // to the first result of the operation.
if (auto *result = dyn_cast<OpResult>(value)) {
if (result->getOwner()->getNumResults() != 1) {
resultNo = result->getResultNumber();
/// Create a new Operation from operation state.
Operation *Operation::create(const OperationState &state) {
unsigned numRegions = state.regions.size();
- Operation *inst = create(
- state.location, state.name, state.operands, state.types, state.attributes,
- state.successors, numRegions, state.resizableOperandList, state.context);
+ Operation *op = create(state.location, state.name, state.operands,
+ state.types, state.attributes, state.successors,
+ numRegions, state.resizableOperandList, state.context);
for (unsigned i = 0; i < numRegions; ++i)
if (state.regions[i])
- inst->getRegion(i).takeBody(*state.regions[i]);
- return inst;
+ op->getRegion(i).takeBody(*state.regions[i]);
+ return op;
}
/// Overload of create that takes an existing NamedAttributeList to avoid
dialect(dialect) {}
// Match by type.
- PatternMatchResult match(Instruction *op) const override {
+ PatternMatchResult match(Operation *op) const override {
if (op->isa<SourceOp>())
return this->matchSuccess();
return this->matchFailure();
// Convert the type of the result to an LLVM type, pass operands as is,
// preserve attributes.
- SmallVector<Value *, 4> rewrite(Instruction *op, ArrayRef<Value *> operands,
+ SmallVector<Value *, 4> rewrite(Operation *op, ArrayRef<Value *> operands,
FuncBuilder &rewriter) const override {
unsigned numResults = op->getNumResults();
auto *mlirContext = op->getContext();
struct AllocOpLowering : public LLVMLegalizationPattern<AllocOp> {
using LLVMLegalizationPattern<AllocOp>::LLVMLegalizationPattern;
- PatternMatchResult match(Instruction *op) const override {
+ PatternMatchResult match(Operation *op) const override {
if (!LLVMLegalizationPattern<AllocOp>::match(op))
return matchFailure();
auto allocOp = op->cast<AllocOp>();
return isSupportedMemRefType(type) ? matchSuccess() : matchFailure();
}
- SmallVector<Value *, 4> rewrite(Instruction *op, ArrayRef<Value *> operands,
+ SmallVector<Value *, 4> rewrite(Operation *op, ArrayRef<Value *> operands,
FuncBuilder &rewriter) const override {
auto allocOp = op->cast<AllocOp>();
MemRefType type = allocOp.getType();
struct DeallocOpLowering : public LLVMLegalizationPattern<DeallocOp> {
using LLVMLegalizationPattern<DeallocOp>::LLVMLegalizationPattern;
- SmallVector<Value *, 4> rewrite(Instruction *op, ArrayRef<Value *> operands,
+ SmallVector<Value *, 4> rewrite(Operation *op, ArrayRef<Value *> operands,
FuncBuilder &rewriter) const override {
assert(operands.size() == 1 && "dealloc takes one operand");
struct MemRefCastOpLowering : public LLVMLegalizationPattern<MemRefCastOp> {
using LLVMLegalizationPattern<MemRefCastOp>::LLVMLegalizationPattern;
- PatternMatchResult match(Instruction *op) const override {
+ PatternMatchResult match(Operation *op) const override {
if (!LLVMLegalizationPattern<MemRefCastOp>::match(op))
return matchFailure();
auto memRefCastOp = op->cast<MemRefCastOp>();
: matchFailure();
}
- SmallVector<Value *, 4> rewrite(Instruction *op, ArrayRef<Value *> operands,
+ SmallVector<Value *, 4> rewrite(Operation *op, ArrayRef<Value *> operands,
FuncBuilder &rewriter) const override {
auto memRefCastOp = op->cast<MemRefCastOp>();
auto targetType = memRefCastOp.getType();
struct DimOpLowering : public LLVMLegalizationPattern<DimOp> {
using LLVMLegalizationPattern<DimOp>::LLVMLegalizationPattern;
- PatternMatchResult match(Instruction *op) const override {
+ PatternMatchResult match(Operation *op) const override {
if (!LLVMLegalizationPattern<DimOp>::match(op))
return this->matchFailure();
auto dimOp = op->cast<DimOp>();
return isSupportedMemRefType(type) ? matchSuccess() : matchFailure();
}
- SmallVector<Value *, 4> rewrite(Instruction *op, ArrayRef<Value *> operands,
+ SmallVector<Value *, 4> rewrite(Operation *op, ArrayRef<Value *> operands,
FuncBuilder &rewriter) const override {
assert(operands.size() == 1 && "expected exactly one operand");
auto dimOp = op->cast<DimOp>();
using LLVMLegalizationPattern<Derived>::LLVMLegalizationPattern;
using Base = LoadStoreOpLowering<Derived>;
- PatternMatchResult match(Instruction *op) const override {
+ PatternMatchResult match(Operation *op) const override {
if (!LLVMLegalizationPattern<Derived>::match(op))
return this->matchFailure();
auto loadOp = op->cast<Derived>();
struct LoadOpLowering : public LoadStoreOpLowering<LoadOp> {
using Base::Base;
- SmallVector<Value *, 4> rewrite(Instruction *op, ArrayRef<Value *> operands,
+ SmallVector<Value *, 4> rewrite(Operation *op, ArrayRef<Value *> operands,
FuncBuilder &rewriter) const override {
auto loadOp = op->cast<LoadOp>();
auto type = loadOp.getMemRefType();
struct StoreOpLowering : public LoadStoreOpLowering<StoreOp> {
using Base::Base;
- SmallVector<Value *, 4> rewrite(Instruction *op, ArrayRef<Value *> operands,
+ SmallVector<Value *, 4> rewrite(Operation *op, ArrayRef<Value *> operands,
FuncBuilder &rewriter) const override {
auto storeOp = op->cast<StoreOp>();
auto type = storeOp.getMemRefType();
using LLVMLegalizationPattern<SourceOp>::LLVMLegalizationPattern;
using Super = OneToOneLLVMTerminatorLowering<SourceOp, TargetOp>;
- void rewriteTerminator(Instruction *op, ArrayRef<Value *> properOperands,
+ void rewriteTerminator(Operation *op, ArrayRef<Value *> properOperands,
ArrayRef<Block *> destinations,
ArrayRef<ArrayRef<Value *>> operands,
FuncBuilder &rewriter) const override {
struct ReturnOpLowering : public LLVMLegalizationPattern<ReturnOp> {
using LLVMLegalizationPattern<ReturnOp>::LLVMLegalizationPattern;
- SmallVector<Value *, 4> rewrite(Instruction *op, ArrayRef<Value *> operands,
+ SmallVector<Value *, 4> rewrite(Operation *op, ArrayRef<Value *> operands,
FuncBuilder &rewriter) const override {
unsigned numArguments = op->getNumOperands();
// Operations
ParseResult parseOperation();
- Instruction *parseGenericOperation();
- Instruction *parseCustomOperation();
+ Operation *parseGenericOperation();
+ Operation *parseCustomOperation();
- ParseResult parseInstructions(Block *block);
+ ParseResult parseOperations(Block *block);
private:
Function *function;
/// Block declaration.
///
-/// block ::= block-label? instruction* terminator-inst
+/// block ::= block-label? operation* terminator-op
/// block-label ::= block-id block-arg-list? `:`
/// block-id ::= caret-id
/// block-arg-list ::= `(` ssa-id-and-type-list? `)`
/// Create and remember a new placeholder for a forward reference.
Value *FunctionParser::createForwardReferencePlaceholder(SMLoc loc, Type type) {
- // Forward references are always created as instructions, even in ML
+ // Forward references are always created as operations, even in ML
// functions, because we just need something with a def/use chain.
//
// We create these placeholders as having an empty name, which we know
// cannot be created through normal user input, allowing us to distinguish
// them.
auto name = OperationName("placeholder", getContext());
- auto *inst = Instruction::create(
+ auto *op = Operation::create(
getEncodedSourceLocation(loc), name, /*operands=*/{}, type,
/*attributes=*/llvm::None, /*successors=*/{}, /*numRegions=*/0,
/*resizableOperandList=*/false, getContext());
- forwardReferencePlaceholders[inst->getResult(0)] = loc;
- return inst->getResult(0);
+ forwardReferencePlaceholders[op->getResult(0)] = loc;
+ return op->getResult(0);
}
/// Given an unbound reference to an SSA value and its type, return the value
FunctionParser::~FunctionParser() {
for (auto &fwd : forwardReferencePlaceholders) {
// Drop all uses of undefined forward declared reference and destroy
- // defining instruction.
+ // defining operation.
fwd.first->dropAllUses();
fwd.first->getDefiningOp()->destroy();
}
return ParseSuccess;
}
-/// Parse a SSA operand for an instruction or instruction.
+/// Parse a SSA operand for an operation.
///
/// ssa-use ::= ssa-id
///
return ParseFailure;
}
- Instruction *op;
+ Operation *op;
if (getToken().is(Token::bare_identifier) || getToken().isKeyword())
op = parseCustomOperation();
else if (getToken().is(Token::string))
if (!op)
return ParseFailure;
- // If the instruction had a name, register it.
+ // If the operation had a name, register it.
if (!resultID.empty()) {
if (op->getNumResults() == 0)
return emitError(loc, "cannot name an operation with no results");
};
} // namespace
-Instruction *FunctionParser::parseGenericOperation() {
+Operation *FunctionParser::parseGenericOperation() {
// Get location information for the operation.
auto srcLocation = getEncodedSourceLocation(getToken().getLoc());
return nullptr;
}
- if (parseToken(Token::colon, "expected ':' followed by instruction type"))
+ if (parseToken(Token::colon, "expected ':' followed by operation type"))
return nullptr;
auto typeLoc = getToken().getLoc();
};
} // end anonymous namespace.
-Instruction *FunctionParser::parseCustomOperation() {
+Operation *FunctionParser::parseCustomOperation() {
auto opLoc = getToken().getLoc();
auto opName = getTokenSpelling();
CustomOpAsmParser opAsmParser(opLoc, opName, *this);
/// A custom binary operation printer that omits the "std." prefix from the
/// operation names.
-void detail::printStandardBinaryOp(Instruction *op, OpAsmPrinter *p) {
+void detail::printStandardBinaryOp(Operation *op, OpAsmPrinter *p) {
assert(op->getNumOperands() == 2 && "binary op should have two operands");
assert(op->getNumResults() == 1 && "binary op should have one result");
>();
}
-void mlir::printDimAndSymbolList(Instruction::operand_iterator begin,
- Instruction::operand_iterator end,
+void mlir::printDimAndSymbolList(Operation::operand_iterator begin,
+ Operation::operand_iterator end,
unsigned numDims, OpAsmPrinter *p) {
*p << '(';
p->printOperands(begin, begin + numDims);
MemRefCastFolder(StringRef rootOpName, MLIRContext *context)
: RewritePattern(rootOpName, 1, context) {}
- PatternMatchResult match(Instruction *op) const override {
+ PatternMatchResult match(Operation *op) const override {
for (auto *operand : op->getOperands())
if (matchPattern(operand, m_Op<MemRefCastOp>()))
return matchSuccess();
return matchFailure();
}
- void rewrite(Instruction *op, PatternRewriter &rewriter) const override {
+ void rewrite(Operation *op, PatternRewriter &rewriter) const override {
for (unsigned i = 0, e = op->getNumOperands(); i != e; ++i)
if (auto *memref = op->getOperand(i)->getDefiningOp())
if (auto cast = memref->dyn_cast<MemRefCastOp>())
}
namespace {
-/// Fold constant dimensions into an alloc instruction.
+/// Fold constant dimensions into an alloc operation.
struct SimplifyAllocConst : public RewritePattern {
SimplifyAllocConst(MLIRContext *context)
: RewritePattern(AllocOp::getOperationName(), 1, context) {}
- PatternMatchResult match(Instruction *op) const override {
+ PatternMatchResult match(Operation *op) const override {
auto alloc = op->cast<AllocOp>();
// Check to see if any dimensions operands are constants. If so, we can
return matchFailure();
}
- void rewrite(Instruction *op, PatternRewriter &rewriter) const override {
+ void rewrite(Operation *op, PatternRewriter &rewriter) const override {
auto allocOp = op->cast<AllocOp>();
auto memrefType = allocOp.getType();
}
};
-/// Fold alloc instructions with no uses. Alloc has side effects on the heap,
+/// Fold alloc operations with no uses. Alloc has side effects on the heap,
/// but can still be deleted if it has zero uses.
struct SimplifyDeadAlloc : public RewritePattern {
SimplifyDeadAlloc(MLIRContext *context)
: RewritePattern(AllocOp::getOperationName(), 1, context) {}
- PatternMatchResult matchAndRewrite(Instruction *op,
+ PatternMatchResult matchAndRewrite(Operation *op,
PatternRewriter &rewriter) const override {
// Check if the alloc'ed value has any uses.
auto alloc = op->cast<AllocOp>();
SimplifyIndirectCallWithKnownCallee(MLIRContext *context)
: RewritePattern(CallIndirectOp::getOperationName(), 1, context) {}
- PatternMatchResult matchAndRewrite(Instruction *op,
+ PatternMatchResult matchAndRewrite(Operation *op,
PatternRewriter &rewriter) const override {
auto indirectCall = op->cast<CallIndirectOp>();
SimplifyConstCondBranchPred(MLIRContext *context)
: RewritePattern(CondBranchOp::getOperationName(), 1, context) {}
- PatternMatchResult matchAndRewrite(Instruction *op,
+ PatternMatchResult matchAndRewrite(Operation *op,
PatternRewriter &rewriter) const override {
auto condbr = op->cast<CondBranchOp>();
ConstantOp::build(builder, result, type, builder->getFloatAttr(type, value));
}
-bool ConstantFloatOp::isClassFor(Instruction *op) {
+bool ConstantFloatOp::isClassFor(Operation *op) {
return ConstantOp::isClassFor(op) &&
op->getResult(0)->getType().isa<FloatType>();
}
/// ConstantIntOp only matches values whose result type is an IntegerType.
-bool ConstantIntOp::isClassFor(Instruction *op) {
+bool ConstantIntOp::isClassFor(Operation *op) {
return ConstantOp::isClassFor(op) &&
op->getResult(0)->getType().isa<IntegerType>();
}
}
/// ConstantIndexOp only matches values whose result type is Index.
-bool ConstantIndexOp::isClassFor(Instruction *op) {
+bool ConstantIndexOp::isClassFor(Operation *op) {
return ConstantOp::isClassFor(op) && op->getResult(0)->getType().isIndex();
}
// DeallocOp
//===----------------------------------------------------------------------===//
namespace {
-/// Fold Dealloc instructions that are deallocating an AllocOp that is only used
+/// Fold Dealloc operations that are deallocating an AllocOp that is only used
/// by other Dealloc operations.
struct SimplifyDeadDealloc : public RewritePattern {
SimplifyDeadDealloc(MLIRContext *context)
: RewritePattern(DeallocOp::getOperationName(), 1, context) {}
- PatternMatchResult matchAndRewrite(Instruction *op,
+ PatternMatchResult matchAndRewrite(Operation *op,
PatternRewriter &rewriter) const override {
auto dealloc = op->cast<DeallocOp>();
- // Check that the memref operand's defining instruction is an AllocOp.
+ // Check that the memref operand's defining operation is an AllocOp.
Value *memref = dealloc.getMemRef();
- Instruction *defOp = memref->getDefiningOp();
+ Operation *defOp = memref->getDefiningOp();
if (!defOp || !defOp->isa<AllocOp>())
return matchFailure();
SimplifyXMinusX(MLIRContext *context)
: RewritePattern(SubIOp::getOperationName(), 1, context) {}
- PatternMatchResult matchAndRewrite(Instruction *op,
+ PatternMatchResult matchAndRewrite(Operation *op,
PatternRewriter &rewriter) const override {
auto subi = op->cast<SubIOp>();
if (subi.getOperand(0) != subi.getOperand(1))
result->addTypes(vectorType);
}
-llvm::iterator_range<Instruction::operand_iterator>
+llvm::iterator_range<Operation::operand_iterator>
VectorTransferReadOp::getIndices() {
auto begin = getOperation()->operand_begin() + Offsets::FirstIndexOffset;
auto end = begin + getMemRefType().getRank();
builder->getAffineMapAttr(permutationMap));
}
-llvm::iterator_range<Instruction::operand_iterator>
+llvm::iterator_range<Operation::operand_iterator>
VectorTransferWriteOp::getIndices() {
auto begin = getOperation()->operand_begin() + Offsets::FirstIndexOffset;
auto end = begin + getMemRefType().getRank();
// Implementation class for module translation. Holds a reference to the module
// being translated, and the mappings between the original and the translated
// functions, basic blocks and values. It is practically easier to hold these
-// mappings in one class since the conversion of control flow instructions
-// needs to look up block and function mappins.
+// mappings in one class since the conversion of control flow operations
+// needs to look up block and function mappings.
class ModuleTranslation {
public:
// Translate the given MLIR module expressed in MLIR LLVM IR dialect into an
bool convertOneFunction(Function &func);
void connectPHINodes(Function &func);
bool convertBlock(Block &bb, bool ignoreArguments);
- bool convertInstruction(Instruction &inst, llvm::IRBuilder<> &builder);
+ bool convertOperation(Operation &op, llvm::IRBuilder<> &builder);
template <typename Range>
SmallVector<llvm::Value *, 8> lookupValues(Range &&values);
return remapped;
}
-// Given a single MLIR instruction, create the corresponding LLVM IR instruction
+// Given a single MLIR operation, create the corresponding LLVM IR operation
// using the `builder`. LLVM IR Builder does not have a generic interface so
// this has to be a long chain of `if`s calling different functions with a
// different number of arguments.
-bool ModuleTranslation::convertInstruction(Instruction &inst,
- llvm::IRBuilder<> &builder) {
+bool ModuleTranslation::convertOperation(Operation &opInst,
+ llvm::IRBuilder<> &builder) {
auto extractPosition = [](ArrayAttr attr) {
SmallVector<unsigned, 4> position;
position.reserve(attr.size());
// itself. Otherwise, this is an indirect call and the callee is the first
// operand, look it up as a normal value. Return the llvm::Value representing
// the function result, which may be of llvm::VoidTy type.
- auto convertCall = [this, &builder](Instruction &inst) -> llvm::Value * {
- auto operands = lookupValues(inst.getOperands());
+ auto convertCall = [this, &builder](Operation &op) -> llvm::Value * {
+ auto operands = lookupValues(op.getOperands());
ArrayRef<llvm::Value *> operandsRef(operands);
- if (auto attr = inst.getAttrOfType<FunctionAttr>("callee")) {
+ if (auto attr = op.getAttrOfType<FunctionAttr>("callee")) {
return builder.CreateCall(functionMapping.lookup(attr.getValue()),
operandsRef);
} else {
// Emit calls. If the called function has a result, remap the corresponding
// value. Note that LLVM IR dialect CallOp has either 0 or 1 result.
- if (auto op = inst.dyn_cast<LLVM::CallOp>()) {
- llvm::Value *result = convertCall(inst);
- if (inst.getNumResults() != 0) {
- valueMapping[inst.getResult(0)] = result;
+ if (opInst.isa<LLVM::CallOp>()) {
+ llvm::Value *result = convertCall(opInst);
+ if (opInst.getNumResults() != 0) {
+ valueMapping[opInst.getResult(0)] = result;
return false;
}
// Check that LLVM call returns void for 0-result functions.
// Emit branches. We need to look up the remapped blocks and ignore the block
// arguments that were transformed into PHI nodes.
- if (auto op = inst.dyn_cast<LLVM::BrOp>()) {
- builder.CreateBr(blockMapping[op.getSuccessor(0)]);
+ if (auto brOp = opInst.dyn_cast<LLVM::BrOp>()) {
+ builder.CreateBr(blockMapping[brOp.getSuccessor(0)]);
return false;
}
- if (auto op = inst.dyn_cast<LLVM::CondBrOp>()) {
- builder.CreateCondBr(valueMapping.lookup(op.getOperand(0)),
- blockMapping[op.getSuccessor(0)],
- blockMapping[op.getSuccessor(1)]);
+ if (auto condbrOp = opInst.dyn_cast<LLVM::CondBrOp>()) {
+ builder.CreateCondBr(valueMapping.lookup(condbrOp.getOperand(0)),
+ blockMapping[condbrOp.getSuccessor(0)],
+ blockMapping[condbrOp.getSuccessor(1)]);
return false;
}
- inst.emitError("unsupported or non-LLVM operation: " +
- inst.getName().getStringRef());
+ opInst.emitError("unsupported or non-LLVM operation: " +
+ opInst.getName().getStringRef());
return true;
}
bool ModuleTranslation::convertBlock(Block &bb, bool ignoreArguments) {
llvm::IRBuilder<> builder(blockMapping[&bb]);
- // Before traversing instructions, make block arguments available through
+ // Before traversing operations, make block arguments available through
// value remapping and PHI nodes, but do not add incoming edges for the PHI
// nodes just yet: those values may be defined by this or following blocks.
// This step is omitted if "ignoreArguments" is set. The arguments of the
}
}
- // Traverse instructions.
- for (auto &inst : bb) {
- if (convertInstruction(inst, builder))
+ // Traverse operations.
+ for (auto &op : bb) {
+ if (convertOperation(op, builder))
return true;
}
return false;
}
-// Get the SSA value passed to the current block from the terminator instruction
+// Get the SSA value passed to the current block from the terminator operation
// of its predecessor.
static Value *getPHISourceValue(Block *current, Block *pred,
unsigned numArguments, unsigned index) {
// through the "true" or the "false" branch and take the relevant operands.
auto condBranchOp = terminator.dyn_cast<LLVM::CondBrOp>();
assert(condBranchOp &&
- "only branch instructions can be terminators of a block that "
+ "only branch operations can be terminators of a block that "
"has successors");
assert((condBranchOp.getSuccessor(0) != condBranchOp.getSuccessor(1)) &&
"successors with arguments in LLVM conditional branches must be "
// assigning a block won't work well with branches, update the body instead.
b1.set({r = arg1 + arg2, edsc::Branch(b2, {arg1, r})});
b2.set({edsc::Branch(b1, {arg3, arg4})});
- auto instr = edsc::Branch(b2, {c1, c2});
+ auto op = edsc::Branch(b2, {c1, c2});
// Emit a branch to b2. This should also emit blocks b2 and b1 that appear as
- // successors to the current block after the branch instruction is insterted.
- edsc::MLIREmitter(&builder, f->getLoc()).emitStmt(instr);
+ // successors to the current block after the branch operation is insterted.
+ edsc::MLIREmitter(&builder, f->getLoc()).emitStmt(op);
// clang-format off
// CHECK-LABEL: @blocks
}
// Inject two EDSC-constructed blocks with arguments and a conditional branch
-// instruction that transfers control to these blocks.
+// operation that transfers control to these blocks.
TEST_FUNC(cond_branch) {
auto f =
makeFunction("cond_branch", {}, {IntegerType::get(1, &globalContext())});
auto f = makeFunction("custom_ops", {}, {indexType, indexType});
ScopedContext scope(f.get());
- CustomInstruction<ValueHandle> MY_CUSTOM_OP("my_custom_op");
- CustomInstruction<InstructionHandle> MY_CUSTOM_INST_0("my_custom_inst_0");
- CustomInstruction<InstructionHandle> MY_CUSTOM_INST_2("my_custom_inst_2");
+ CustomOperation<ValueHandle> MY_CUSTOM_OP("my_custom_op");
+ CustomOperation<OperationHandle> MY_CUSTOM_OP_0("my_custom_op_0");
+ CustomOperation<OperationHandle> MY_CUSTOM_OP_2("my_custom_op_2");
// clang-format off
ValueHandle vh(indexType), vh20(indexType), vh21(indexType);
- InstructionHandle ih0, ih2;
+ OperationHandle ih0, ih2;
IndexHandle m, n, M(f->getArgument(0)), N(f->getArgument(1));
IndexHandle ten(index_t(10)), twenty(index_t(20));
LoopNestBuilder({&m, &n}, {M, N}, {M + ten, N + twenty}, {1, 1})({
vh = MY_CUSTOM_OP({m, m + n}, {indexType}, {}),
- ih0 = MY_CUSTOM_INST_0({m, m + n}, {}),
- ih2 = MY_CUSTOM_INST_2({m, m + n}, {indexType, indexType}),
+ ih0 = MY_CUSTOM_OP_0({m, m + n}, {}),
+ ih2 = MY_CUSTOM_OP_2({m, m + n}, {indexType, indexType}),
// These captures are verbose for now, can improve when used in practice.
vh20 = ValueHandle(ih2.getOperation()->getResult(0)),
vh21 = ValueHandle(ih2.getOperation()->getResult(1)),
// CHECK: affine.for %i0 {{.*}}
// CHECK: affine.for %i1 {{.*}}
// CHECK: {{.*}} = "my_custom_op"{{.*}} : (index, index) -> index
- // CHECK: "my_custom_inst_0"{{.*}} : (index, index) -> ()
- // CHECK: [[TWO:%[a-z0-9]+]] = "my_custom_inst_2"{{.*}} : (index, index) -> (index, index)
+ // CHECK: "my_custom_op_0"{{.*}} : (index, index) -> ()
+ // CHECK: [[TWO:%[a-z0-9]+]] = "my_custom_op_2"{{.*}} : (index, index) -> (index, index)
// CHECK: {{.*}} = "my_custom_op"([[TWO]]#0, [[TWO]]#1) : (index, index) -> index
// clang-format on
f->print(llvm::outs());
// -----
-func @non_instruction() {
+func @non_operation() {
asd // expected-error {{custom op 'asd' is unknown}}
}
// CHECK-LABEL: func @ops(%arg0: !llvm<"i32">, %arg1: !llvm<"float">)
func @ops(%arg0 : !llvm<"i32">, %arg1 : !llvm<"float">) {
-// Integer artithmetics binary instructions.
+// Integer artithmetics binary operations.
//
// CHECK-NEXT: %0 = "llvm.add"(%arg0, %arg0) : (!llvm<"i32">, !llvm<"i32">) -> !llvm<"i32">
// CHECK-NEXT: %1 = "llvm.sub"(%arg0, %arg0) : (!llvm<"i32">, !llvm<"i32">) -> !llvm<"i32">
%6 = "llvm.srem"(%arg0, %arg0) : (!llvm<"i32">, !llvm<"i32">) -> !llvm<"i32">
%7 = "llvm.icmp"(%arg0, %arg0) {predicate: 1} : (!llvm<"i32">, !llvm<"i32">) -> !llvm<"i1">
-// Floating point binary instructions.
+// Floating point binary operations.
//
// CHECK-NEXT: %8 = "llvm.fadd"(%arg1, %arg1) : (!llvm<"float">, !llvm<"float">) -> !llvm<"float">
// CHECK-NEXT: %9 = "llvm.fsub"(%arg1, %arg1) : (!llvm<"float">, !llvm<"float">) -> !llvm<"float">
%11 = "llvm.fdiv"(%arg1, %arg1) : (!llvm<"float">, !llvm<"float">) -> !llvm<"float">
%12 = "llvm.frem"(%arg1, %arg1) : (!llvm<"float">, !llvm<"float">) -> !llvm<"float">
-// Memory-related instructions.
+// Memory-related operations.
//
// CHECK-NEXT: %13 = "llvm.alloca"(%arg0) : (!llvm<"i32">) -> !llvm<"double*">
// CHECK-NEXT: %14 = "llvm.getelementptr"(%13, %arg0, %arg0) : (!llvm<"double*">, !llvm<"i32">, !llvm<"i32">) -> !llvm<"double*">
"llvm.store"(%15, %13) : (!llvm<"double">, !llvm<"double*">) -> ()
%16 = "llvm.bitcast"(%13) : (!llvm<"double*">) -> !llvm<"i64*">
-// Function call-related instructions.
+// Function call-related operations.
//
// CHECK-NEXT: %17 = "llvm.call"(%arg0) {callee: @foo : (!llvm<"i32">) -> !llvm<"{ i32, double, i32 }">} : (!llvm<"i32">) -> !llvm<"{ i32, double, i32 }">
// CHECK-NEXT: %18 = "llvm.extractvalue"(%17) {position: [0]} : (!llvm<"{ i32, double, i32 }">) -> !llvm<"i32">
%18 = "llvm.extractvalue"(%17) {position: [0]} : (!llvm<"{ i32, double, i32 }">) -> !llvm<"i32">
%19 = "llvm.insertvalue"(%17, %18) {position: [2]} : (!llvm<"{ i32, double, i32 }">, !llvm<"i32">) -> !llvm<"{ i32, double, i32 }">
-// Terminator instructions and their successors.
+// Terminator operations and their successors.
//
// CHECK: "llvm.br"()[^bb1] : () -> ()
"llvm.br"()[^bb1] : () -> ()
%20 = "llvm.pseudo.undef"() : () -> !llvm<"{ i32, double, i32 }">
%21 = "llvm.pseudo.constant"() {value: 42} : () -> !llvm<"i47">
-// Misc instructions.
+// Misc operations.
// CHECK: %22 = "llvm.select"(%7, %0, %1) : (!llvm<"i1">, !llvm<"i32">, !llvm<"i32">) -> !llvm<"i32">
// CHECK-NEXT: "llvm.return"() : () -> ()
%22 = "llvm.select"(%7, %0, %1) : (!llvm<"i1">, !llvm<"i32">, !llvm<"i32">) -> !llvm<"i32">
return
}
// There are three regions here - the 'load' preceding the loop, the loop
-// itself, and the instructions appearing after the loop.
+// itself, and the operations appearing after the loop.
// CHECK: %0 = alloc() : memref<256xf32>
// CHECK-NEXT: %1 = alloc() : memref<1xf32, 2>
// CHECK-NEXT: %2 = alloc() : memref<1xi32>
//===---------------------------------------------------------------------===//
// Test lowering of Euclidean (floor) division, ceil division and modulo
// operation used in affine expressions. In addition to testing the
-// instruction-level output, check that the obtained results are correct by
+// operation-level output, check that the obtained results are correct by
// applying constant folding transformation after affine lowering.
//===---------------------------------------------------------------------===//
memref<32 x 32 x f32, 2>, memref<1 x i32>
dma_wait %tag[%zero], %num_elt : memref<1 x i32>
}
- // Use live out of 'affine.for' inst; no DMA pipelining will be done.
+ // Use live out of 'affine.for' op; no DMA pipelining will be done.
%v = load %Av[%zero, %zero] : memref<32 x 32 x f32, 2>
return %v : f32
// CHECK: %{{[0-9]+}} = load %{{[0-9]+}}[%c0, %c0] : memref<32x32xf32, 2>
// CHECK: struct GeneratedConvert0 : public RewritePattern
// CHECK: RewritePattern("x.add", 1, context)
-// CHECK: PatternMatchResult match(Instruction *
-// CHECK: void rewrite(Instruction *op, std::unique_ptr<PatternState>
+// CHECK: PatternMatchResult match(Operation *
+// CHECK: void rewrite(Operation *op, std::unique_ptr<PatternState>
// CHECK: PatternRewriter &rewriter)
// CHECK: rewriter.create<Y::AddOp>(loc, op->getResult(0)->getType()
// CHECK: void populateWithGenerated
// CHECK: using Op::Op;
// CHECK: static StringRef getOperationName();
// CHECK: Value *a();
-// CHECK: Instruction::operand_range b();
+// CHECK: Operation::operand_range b();
// CHECK: Value *r();
// CHECK: APInt attr1();
// CHECK: Optional< APFloat > attr2();
def : Pat<(Y_AddOp $lhs, $rhs, $attr1), (Y_AddOp $lhs, $rhs, (T_Compose_Attr $attr1, T_Const_Attr:$attr2))>;
// CHECK: struct GeneratedConvert0 : public RewritePattern
// CHECK: RewritePattern("y.add", 1, context)
-// CHECK: PatternMatchResult match(Instruction *
-// CHECK: void rewrite(Instruction *op, std::unique_ptr<PatternState>
+// CHECK: PatternMatchResult match(Operation *
+// CHECK: void rewrite(Operation *op, std::unique_ptr<PatternState>
// CHECK-NEXT: PatternRewriter &rewriter)
// CHECK: auto vAddOp0 = rewriter.create<Y::AddOp>(loc, op->getResult(0)->getType(),
// CHECK-NEXT: s.lhs,
def : Pat<(Z_AddOp $lhs, $rhs, $attr1, $attr2), (Y_AddOp $lhs, $rhs, (T_Compose_Attr $attr1, $attr2))>;
// CHECK: struct GeneratedConvert1 : public RewritePattern
// CHECK: RewritePattern("z.add", 1, context)
-// CHECK: PatternMatchResult match(Instruction *
-// CHECK: void rewrite(Instruction *op, std::unique_ptr<PatternState>
+// CHECK: PatternMatchResult match(Operation *
+// CHECK: void rewrite(Operation *op, std::unique_ptr<PatternState>
// CHECK-NEXT: PatternRewriter &rewriter)
// CHECK: auto vAddOp0 = rewriter.create<Y::AddOp>(loc, op->getResult(0)->getType(),
// CHECK-NEXT: s.lhs,
bs << "op.getResult()->getType().cast<LLVM::LLVMType>()."
"getUnderlyingType()";
} else if (name == "_hasResult") {
- bs << "inst.getNumResults() == 1";
+ bs << "opInst.getNumResults() == 1";
} else if (name == "_location") {
- bs << "inst.getLoc()";
+ bs << "opInst.getLoc()";
} else if (name == "_numOperands") {
- bs << "inst.getNumOperands()";
+ bs << "opInst.getNumOperands()";
} else if (name == "$") {
bs << '$';
} else {
}
// Output the check and the rewritten builder string.
- os << "if (auto op = inst.dyn_cast<" << op.getQualCppClassName()
+ os << "if (auto op = opInst.dyn_cast<" << op.getQualCppClassName()
<< ">()) {\n";
os << bs.str() << builderStrRef << "\n";
os << " return false;\n";
assert(getOperation()->getNumOperands() >= {0});
return {std::next(operand_begin(), {0}), operand_end()};
)";
- auto &m = opClass.newMethod("Instruction::operand_range", operand.name);
+ auto &m = opClass.newMethod("Operation::operand_range", operand.name);
m.body() << formatv(code, i);
}
}
int indent = 4 + 2 * depth;
// Skip the operand matching at depth 0 as the pattern rewriter already does.
if (depth != 0) {
- // Skip if there is no defining instruction (e.g., arguments to function).
+ // Skip if there is no defining operation (e.g., arguments to function).
os.indent(indent) << formatv("if (!op{0}) return matchFailure();\n", depth);
os.indent(indent) << formatv(
"if (!op{0}->isa<{1}>()) return matchFailure();\n", depth,
void PatternEmitter::emitMatchMethod(DagNode tree) {
// Emit the heading.
os << R"(
- PatternMatchResult match(Instruction *op0) const override {
+ PatternMatchResult match(Operation *op0) const override {
auto ctx = op0->getContext(); (void)ctx;
auto state = llvm::make_unique<MatchedState>();)"
<< "\n";
}
for (auto &res : pattern.getSourcePatternBoundResults())
- os.indent(4) << formatv("mlir::Instruction* {0}; (void){0};\n",
+ os.indent(4) << formatv("mlir::Operation* {0}; (void){0};\n",
resultName(res.first()));
emitOpMatch(tree, 0);
PrintFatalError(loc, "must provide at least one result pattern");
os << R"(
- void rewrite(Instruction *op, std::unique_ptr<PatternState> state,
+ void rewrite(Operation *op, std::unique_ptr<PatternState> state,
PatternRewriter &rewriter) const override {
auto& s = *static_cast<MatchedState *>(state.get());
auto loc = op->getLoc(); (void)loc;
builder.getIntegerType(16));
Value *operand = useOp->getResult(0);
- // Create a non-resizable instruction with one operand.
+ // Create a non-resizable operation with one operand.
Operation *user = createOp(&context, /*resizableOperands=*/false, operand,
builder.getIntegerType(16));
user->setOperands(llvm::None);
EXPECT_EQ(user->getNumOperands(), 0);
- // Destroy the instructions.
+ // Destroy the operations.
user->destroy();
useOp->destroy();
}
builder.getIntegerType(16));
Value *operand = useOp->getResult(0);
- // Create a non-resizable instruction with one operand.
+ // Create a non-resizable operation with one operand.
Operation *user = createOp(&context, /*resizableOperands=*/false, operand,
builder.getIntegerType(16));
// Sanity check the storage.
EXPECT_EQ(user->hasResizableOperandsList(), false);
- // Adding operands to a non resizable instruction should result in a failure.
+ // Adding operands to a non resizable operation should result in a failure.
ASSERT_DEATH(user->setOperands({operand, operand}), "");
}
builder.getIntegerType(16));
Value *operand = useOp->getResult(0);
- // Create a resizable instruction with one operand.
+ // Create a resizable operation with one operand.
Operation *user = createOp(&context, /*resizableOperands=*/true, operand,
builder.getIntegerType(16));
user->setOperands({operand, operand, operand});
EXPECT_EQ(user->getNumOperands(), 3);
- // Destroy the instructions.
+ // Destroy the operations.
user->destroy();
useOp->destroy();
}