Summary: These were temporary methods used to simplify the transition.
Reviewed By: antiagainst
Differential Revision: https://reviews.llvm.org/D72548
For example, we can write
```tablegen
-def HasNoUseOf: Constraint<
- CPred<"$_self->use_begin() == $_self->use_end()">, "has no use">;
+def HasNoUseOf: Constraint<CPred<"$_self.use_empty()">, "has no use">;
def HasSameElementType : Constraint<
CPred<"$0.cast<ShapedType>().getElementType() == "
we want the constraints on each type definition reads naturally and we want
to attach type constraints directly to an operand/result, `$_self` will be
replaced by the operand/result's type. E.g., for `F32` in `F32:$operand`, its
- `$_self` will be expanded as `getOperand(...)->getType()`.
+ `$_self` will be expanded as `getOperand(...).getType()`.
TODO(b/130663252): Reconsider the leading symbol for special placeholders.
Eventually we want to allow referencing operand/result $-names; such $-names
```tablegen
def createTFLLeakyRelu : NativeCodeCall<
- "createTFLLeakyRelu($_builder, $0->getDefiningOp(), $1, $2)">;
+ "createTFLLeakyRelu($_builder, $0.getDefiningOp(), $1, $2)">;
def : Pat<(TF_LeakyReluOp:$old_value, $arg, F32Attr:$a),
(createTFLLeakyRelu $old_value, $arg, $a)>;
static Value createTFLLeakyRelu(PatternRewriter &rewriter, Operation *op,
Value operand, Attribute attr) {
return rewriter.create<mlir::TFL::LeakyReluOp>(
- op->getLoc(), operands[0]->getType(), /*arg=*/operands[0],
+ op->getLoc(), operands[0].getType(), /*arg=*/operands[0],
/*alpha=*/attrs[0].cast<FloatAttr>());
}
```
void rewrite(Operation *op, PatternRewriter &rewriter) const override {
rewriter.replaceOpWithNewOp<TFL::LeakyReluOp>(
- op, op->getResult(0)->getType(), op->getOperand(0),
+ op, op->getResult(0).getType(), op->getOperand(0),
/*alpha=*/op->getAttrOfType<FloatAttr>("alpha"));
}
};
PatternMatchResult matchAndRewrite(Operation *op,
PatternRewriter &rewriter) const override {
rewriter.replaceOpWithNewOp<TFL::LeakyReluOp>(
- op, op->getResult(0)->getType(), op->getOperand(0),
+ op, op->getResult(0).getType(), op->getOperand(0),
/*alpha=*/op->getAttrOfType<FloatAttr>("alpha"));
return matchSuccess();
}
// Look through the input of the current transpose.
mlir::Value transposeInput = op.getOperand();
TransposeOp transposeInputOp =
- llvm::dyn_cast_or_null<TransposeOp>(transposeInput->getDefiningOp());
+ llvm::dyn_cast_or_null<TransposeOp>(transposeInput.getDefiningOp());
// If the input is defined by another Transpose, bingo!
if (!transposeInputOp)
return matchFailure();
i.e. when the input and output shapes are identical.
```tablegen
-def TypesAreIdentical : Constraint<CPred<"$0->getType() == $1->getType()">>;
+def TypesAreIdentical : Constraint<CPred<"$0.getType() == $1.getType()">>;
def RedundantReshapeOptPattern : Pat<
(ReshapeOp:$res $arg), (replaceWithValue $arg),
[(TypesAreIdentical $res, $arg)]>;
eliminating the reshape operation.
```tablegen
-def ReshapeConstant : NativeCodeCall<"$0.reshape(($1->getType()).cast<ShapedType>())">;
+def ReshapeConstant : NativeCodeCall<"$0.reshape(($1.getType()).cast<ShapedType>())">;
def FoldConstantReshapeOptPattern : Pat<
(ReshapeOp:$res (ConstantOp $arg)),
(ConstantOp (ReshapeConstant $arg, $res))>;
// Replace the values directly with the return operands.
assert(returnOp.getNumOperands() == valuesToRepl.size());
for (const auto &it : llvm::enumerate(returnOp.getOperands()))
- valuesToRepl[it.index()]->replaceAllUsesWith(it.value());
+ valuesToRepl[it.index()].replaceAllUsesWith(it.value());
}
};
```
```c++
/// Infer the output shape of the MulOp, this is required by the shape inference
/// interface.
-void MulOp::inferShapes() { getResult()->setType(getOperand(0)->getType()); }
+void MulOp::inferShapes() { getResult().setType(getOperand(0).getType()); }
```
At this point, each of the necessary Toy operations provide a mechanism by which
static mlir::LogicalResult verify(ConstantOp op) {
// If the return type of the constant is not an unranked tensor, the shape
// must match the shape of the attribute holding the data.
- auto resultType =
- op.getResult()->getType().dyn_cast<mlir::RankedTensorType>();
+ auto resultType = op.getResult().getType().dyn_cast<mlir::RankedTensorType>();
if (!resultType)
return success();
}
static mlir::LogicalResult verify(TransposeOp op) {
- auto inputType = op.getOperand()->getType().dyn_cast<RankedTensorType>();
+ auto inputType = op.getOperand().getType().dyn_cast<RankedTensorType>();
auto resultType = op.getType().dyn_cast<RankedTensorType>();
if (!inputType || !resultType)
return mlir::success();
static mlir::LogicalResult verify(ConstantOp op) {
// If the return type of the constant is not an unranked tensor, the shape
// must match the shape of the attribute holding the data.
- auto resultType =
- op.getResult()->getType().dyn_cast<mlir::RankedTensorType>();
+ auto resultType = op.getResult().getType().dyn_cast<mlir::RankedTensorType>();
if (!resultType)
return success();
}
static mlir::LogicalResult verify(TransposeOp op) {
- auto inputType = op.getOperand()->getType().dyn_cast<RankedTensorType>();
+ auto inputType = op.getOperand().getType().dyn_cast<RankedTensorType>();
auto resultType = op.getType().dyn_cast<RankedTensorType>();
if (!inputType || !resultType)
return mlir::success();
// Look through the input of the current transpose.
mlir::Value transposeInput = op.getOperand();
TransposeOp transposeInputOp =
- llvm::dyn_cast_or_null<TransposeOp>(transposeInput->getDefiningOp());
+ llvm::dyn_cast_or_null<TransposeOp>(transposeInput.getDefiningOp());
// If the input is defined by another Transpose, bingo!
if (!transposeInputOp)
// Reshape(Constant(x)) = x'
def ReshapeConstant :
- NativeCodeCall<"$0.reshape(($1->getType()).cast<ShapedType>())">;
+ NativeCodeCall<"$0.reshape(($1.getType()).cast<ShapedType>())">;
def FoldConstantReshapeOptPattern : Pat<
(ReshapeOp:$res (ConstantOp $arg)),
(ConstantOp (ReshapeConstant $arg, $res))>;
// on operand properties.
// Reshape(x) = x, where input and output shapes are identical
-def TypesAreIdentical : Constraint<CPred<"$0->getType() == $1->getType()">>;
+def TypesAreIdentical : Constraint<CPred<"$0.getType() == $1.getType()">>;
def RedundantReshapeOptPattern : Pat<
(ReshapeOp:$res $arg), (replaceWithValue $arg),
[(TypesAreIdentical $res, $arg)]>;
// Replace the values directly with the return operands.
assert(returnOp.getNumOperands() == valuesToRepl.size());
for (const auto &it : llvm::enumerate(returnOp.getOperands()))
- valuesToRepl[it.index()]->replaceAllUsesWith(it.value());
+ valuesToRepl[it.index()].replaceAllUsesWith(it.value());
}
/// Attempts to materialize a conversion for a type mismatch between a call
static mlir::LogicalResult verify(ConstantOp op) {
// If the return type of the constant is not an unranked tensor, the shape
// must match the shape of the attribute holding the data.
- auto resultType =
- op.getResult()->getType().dyn_cast<mlir::RankedTensorType>();
+ auto resultType = op.getResult().getType().dyn_cast<mlir::RankedTensorType>();
if (!resultType)
return success();
/// Infer the output shape of the AddOp, this is required by the shape inference
/// interface.
-void AddOp::inferShapes() { getResult()->setType(getOperand(0)->getType()); }
+void AddOp::inferShapes() { getResult().setType(getOperand(0).getType()); }
//===----------------------------------------------------------------------===//
// CastOp
/// Infer the output shape of the CastOp, this is required by the shape
/// inference interface.
-void CastOp::inferShapes() { getResult()->setType(getOperand()->getType()); }
+void CastOp::inferShapes() { getResult().setType(getOperand().getType()); }
//===----------------------------------------------------------------------===//
// GenericCallOp
/// Infer the output shape of the MulOp, this is required by the shape inference
/// interface.
-void MulOp::inferShapes() { getResult()->setType(getOperand(0)->getType()); }
+void MulOp::inferShapes() { getResult().setType(getOperand(0).getType()); }
//===----------------------------------------------------------------------===//
// ReturnOp
}
void TransposeOp::inferShapes() {
- auto arrayTy = getOperand()->getType().cast<RankedTensorType>();
+ auto arrayTy = getOperand().getType().cast<RankedTensorType>();
SmallVector<int64_t, 2> dims(llvm::reverse(arrayTy.getShape()));
- getResult()->setType(RankedTensorType::get(dims, arrayTy.getElementType()));
+ getResult().setType(RankedTensorType::get(dims, arrayTy.getElementType()));
}
static mlir::LogicalResult verify(TransposeOp op) {
- auto inputType = op.getOperand()->getType().dyn_cast<RankedTensorType>();
+ auto inputType = op.getOperand().getType().dyn_cast<RankedTensorType>();
auto resultType = op.getType().dyn_cast<RankedTensorType>();
if (!inputType || !resultType)
return mlir::success();
// Look through the input of the current transpose.
mlir::Value transposeInput = op.getOperand();
TransposeOp transposeInputOp =
- llvm::dyn_cast_or_null<TransposeOp>(transposeInput->getDefiningOp());
+ llvm::dyn_cast_or_null<TransposeOp>(transposeInput.getDefiningOp());
// If the input is defined by another Transpose, bingo!
if (!transposeInputOp)
// Reshape(Constant(x)) = x'
def ReshapeConstant :
- NativeCodeCall<"$0.reshape(($1->getType()).cast<ShapedType>())">;
+ NativeCodeCall<"$0.reshape(($1.getType()).cast<ShapedType>())">;
def FoldConstantReshapeOptPattern : Pat<
(ReshapeOp:$res (ConstantOp $arg)),
(ConstantOp (ReshapeConstant $arg, $res))>;
// on operand properties.
// Reshape(x) = x, where input and output shapes are identical
-def TypesAreIdentical : Constraint<CPred<"$0->getType() == $1->getType()">>;
+def TypesAreIdentical : Constraint<CPred<"$0.getType() == $1.getType()">>;
def RedundantReshapeOptPattern : Pat<
(ReshapeOp:$res $arg), (replaceWithValue $arg),
[(TypesAreIdentical $res, $arg)]>;
// Replace the values directly with the return operands.
assert(returnOp.getNumOperands() == valuesToRepl.size());
for (const auto &it : llvm::enumerate(returnOp.getOperands()))
- valuesToRepl[it.index()]->replaceAllUsesWith(it.value());
+ valuesToRepl[it.index()].replaceAllUsesWith(it.value());
}
/// Attempts to materialize a conversion for a type mismatch between a call
static mlir::LogicalResult verify(ConstantOp op) {
// If the return type of the constant is not an unranked tensor, the shape
// must match the shape of the attribute holding the data.
- auto resultType =
- op.getResult()->getType().dyn_cast<mlir::RankedTensorType>();
+ auto resultType = op.getResult().getType().dyn_cast<mlir::RankedTensorType>();
if (!resultType)
return success();
/// Infer the output shape of the AddOp, this is required by the shape inference
/// interface.
-void AddOp::inferShapes() { getResult()->setType(getOperand(0)->getType()); }
+void AddOp::inferShapes() { getResult().setType(getOperand(0).getType()); }
//===----------------------------------------------------------------------===//
// CastOp
/// Infer the output shape of the CastOp, this is required by the shape
/// inference interface.
-void CastOp::inferShapes() { getResult()->setType(getOperand()->getType()); }
+void CastOp::inferShapes() { getResult().setType(getOperand().getType()); }
//===----------------------------------------------------------------------===//
// GenericCallOp
/// Infer the output shape of the MulOp, this is required by the shape inference
/// interface.
-void MulOp::inferShapes() { getResult()->setType(getOperand(0)->getType()); }
+void MulOp::inferShapes() { getResult().setType(getOperand(0).getType()); }
//===----------------------------------------------------------------------===//
// ReturnOp
}
void TransposeOp::inferShapes() {
- auto arrayTy = getOperand()->getType().cast<RankedTensorType>();
+ auto arrayTy = getOperand().getType().cast<RankedTensorType>();
SmallVector<int64_t, 2> dims(llvm::reverse(arrayTy.getShape()));
- getResult()->setType(RankedTensorType::get(dims, arrayTy.getElementType()));
+ getResult().setType(RankedTensorType::get(dims, arrayTy.getElementType()));
}
static mlir::LogicalResult verify(TransposeOp op) {
- auto inputType = op.getOperand()->getType().dyn_cast<RankedTensorType>();
+ auto inputType = op.getOperand().getType().dyn_cast<RankedTensorType>();
auto resultType = op.getType().dyn_cast<RankedTensorType>();
if (!inputType || !resultType)
return mlir::success();
// Look through the input of the current transpose.
mlir::Value transposeInput = op.getOperand();
TransposeOp transposeInputOp =
- llvm::dyn_cast_or_null<TransposeOp>(transposeInput->getDefiningOp());
+ llvm::dyn_cast_or_null<TransposeOp>(transposeInput.getDefiningOp());
// If the input is defined by another Transpose, bingo!
if (!transposeInputOp)
// Reshape(Constant(x)) = x'
def ReshapeConstant :
- NativeCodeCall<"$0.reshape(($1->getType()).cast<ShapedType>())">;
+ NativeCodeCall<"$0.reshape(($1.getType()).cast<ShapedType>())">;
def FoldConstantReshapeOptPattern : Pat<
(ReshapeOp:$res (ConstantOp $arg)),
(ConstantOp (ReshapeConstant $arg, $res))>;
// on operand properties.
// Reshape(x) = x, where input and output shapes are identical
-def TypesAreIdentical : Constraint<CPred<"$0->getType() == $1->getType()">>;
+def TypesAreIdentical : Constraint<CPred<"$0.getType() == $1.getType()">>;
def RedundantReshapeOptPattern : Pat<
(ReshapeOp:$res $arg), (replaceWithValue $arg),
[(TypesAreIdentical $res, $arg)]>;
// Replace the values directly with the return operands.
assert(returnOp.getNumOperands() == valuesToRepl.size());
for (const auto &it : llvm::enumerate(returnOp.getOperands()))
- valuesToRepl[it.index()]->replaceAllUsesWith(it.value());
+ valuesToRepl[it.index()].replaceAllUsesWith(it.value());
}
/// Attempts to materialize a conversion for a type mismatch between a call
static mlir::LogicalResult verify(ConstantOp op) {
// If the return type of the constant is not an unranked tensor, the shape
// must match the shape of the attribute holding the data.
- auto resultType =
- op.getResult()->getType().dyn_cast<mlir::RankedTensorType>();
+ auto resultType = op.getResult().getType().dyn_cast<mlir::RankedTensorType>();
if (!resultType)
return success();
/// Infer the output shape of the AddOp, this is required by the shape inference
/// interface.
-void AddOp::inferShapes() { getResult()->setType(getOperand(0)->getType()); }
+void AddOp::inferShapes() { getResult().setType(getOperand(0).getType()); }
//===----------------------------------------------------------------------===//
// CastOp
/// Infer the output shape of the CastOp, this is required by the shape
/// inference interface.
-void CastOp::inferShapes() { getResult()->setType(getOperand()->getType()); }
+void CastOp::inferShapes() { getResult().setType(getOperand().getType()); }
//===----------------------------------------------------------------------===//
// GenericCallOp
/// Infer the output shape of the MulOp, this is required by the shape inference
/// interface.
-void MulOp::inferShapes() { getResult()->setType(getOperand(0)->getType()); }
+void MulOp::inferShapes() { getResult().setType(getOperand(0).getType()); }
//===----------------------------------------------------------------------===//
// ReturnOp
}
void TransposeOp::inferShapes() {
- auto arrayTy = getOperand()->getType().cast<RankedTensorType>();
+ auto arrayTy = getOperand().getType().cast<RankedTensorType>();
SmallVector<int64_t, 2> dims(llvm::reverse(arrayTy.getShape()));
- getResult()->setType(RankedTensorType::get(dims, arrayTy.getElementType()));
+ getResult().setType(RankedTensorType::get(dims, arrayTy.getElementType()));
}
static mlir::LogicalResult verify(TransposeOp op) {
- auto inputType = op.getOperand()->getType().dyn_cast<RankedTensorType>();
+ auto inputType = op.getOperand().getType().dyn_cast<RankedTensorType>();
auto resultType = op.getType().dyn_cast<RankedTensorType>();
if (!inputType || !resultType)
return mlir::success();
// Look through the input of the current transpose.
mlir::Value transposeInput = op.getOperand();
TransposeOp transposeInputOp =
- llvm::dyn_cast_or_null<TransposeOp>(transposeInput->getDefiningOp());
+ llvm::dyn_cast_or_null<TransposeOp>(transposeInput.getDefiningOp());
// If the input is defined by another Transpose, bingo!
if (!transposeInputOp)
// Reshape(Constant(x)) = x'
def ReshapeConstant :
- NativeCodeCall<"$0.reshape(($1->getType()).cast<ShapedType>())">;
+ NativeCodeCall<"$0.reshape(($1.getType()).cast<ShapedType>())">;
def FoldConstantReshapeOptPattern : Pat<
(ReshapeOp:$res (ConstantOp $arg)),
(ConstantOp (ReshapeConstant $arg, $res))>;
// on operand properties.
// Reshape(x) = x, where input and output shapes are identical
-def TypesAreIdentical : Constraint<CPred<"$0->getType() == $1->getType()">>;
+def TypesAreIdentical : Constraint<CPred<"$0.getType() == $1.getType()">>;
def RedundantReshapeOptPattern : Pat<
(ReshapeOp:$res $arg), (replaceWithValue $arg),
[(TypesAreIdentical $res, $arg)]>;
// Replace the values directly with the return operands.
assert(returnOp.getNumOperands() == valuesToRepl.size());
for (const auto &it : llvm::enumerate(returnOp.getOperands()))
- valuesToRepl[it.index()]->replaceAllUsesWith(it.value());
+ valuesToRepl[it.index()].replaceAllUsesWith(it.value());
}
/// Attempts to materialize a conversion for a type mismatch between a call
/// Verifier for the constant operation. This corresponds to the `::verify(...)`
/// in the op definition.
static mlir::LogicalResult verify(ConstantOp op) {
- return verifyConstantForType(op.getResult()->getType(), op.value(), op);
+ return verifyConstantForType(op.getResult().getType(), op.value(), op);
}
static mlir::LogicalResult verify(StructConstantOp op) {
- return verifyConstantForType(op.getResult()->getType(), op.value(), op);
+ return verifyConstantForType(op.getResult().getType(), op.value(), op);
}
/// Infer the output shape of the ConstantOp, this is required by the shape
/// inference interface.
-void ConstantOp::inferShapes() { getResult()->setType(value().getType()); }
+void ConstantOp::inferShapes() { getResult().setType(value().getType()); }
//===----------------------------------------------------------------------===//
// AddOp
/// Infer the output shape of the AddOp, this is required by the shape inference
/// interface.
-void AddOp::inferShapes() { getResult()->setType(getOperand(0)->getType()); }
+void AddOp::inferShapes() { getResult().setType(getOperand(0).getType()); }
//===----------------------------------------------------------------------===//
// CastOp
/// Infer the output shape of the CastOp, this is required by the shape
/// inference interface.
-void CastOp::inferShapes() { getResult()->setType(getOperand()->getType()); }
+void CastOp::inferShapes() { getResult().setType(getOperand().getType()); }
//===----------------------------------------------------------------------===//
// GenericCallOp
/// Infer the output shape of the MulOp, this is required by the shape inference
/// interface.
-void MulOp::inferShapes() { getResult()->setType(getOperand(0)->getType()); }
+void MulOp::inferShapes() { getResult().setType(getOperand(0).getType()); }
//===----------------------------------------------------------------------===//
// ReturnOp
void StructAccessOp::build(mlir::Builder *b, mlir::OperationState &state,
mlir::Value input, size_t index) {
// Extract the result type from the input type.
- StructType structTy = input->getType().cast<StructType>();
+ StructType structTy = input.getType().cast<StructType>();
assert(index < structTy.getNumElementTypes());
mlir::Type resultType = structTy.getElementTypes()[index];
}
static mlir::LogicalResult verify(StructAccessOp op) {
- StructType structTy = op.input()->getType().cast<StructType>();
+ StructType structTy = op.input().getType().cast<StructType>();
size_t index = op.index().getZExtValue();
if (index >= structTy.getNumElementTypes())
return op.emitOpError()
<< "index should be within the range of the input struct type";
- mlir::Type resultType = op.getResult()->getType();
+ mlir::Type resultType = op.getResult().getType();
if (resultType != structTy.getElementTypes()[index])
return op.emitOpError() << "must have the same result type as the struct "
"element referred to by the index";
}
void TransposeOp::inferShapes() {
- auto arrayTy = getOperand()->getType().cast<RankedTensorType>();
+ auto arrayTy = getOperand().getType().cast<RankedTensorType>();
SmallVector<int64_t, 2> dims(llvm::reverse(arrayTy.getShape()));
- getResult()->setType(RankedTensorType::get(dims, arrayTy.getElementType()));
+ getResult().setType(RankedTensorType::get(dims, arrayTy.getElementType()));
}
static mlir::LogicalResult verify(TransposeOp op) {
- auto inputType = op.getOperand()->getType().dyn_cast<RankedTensorType>();
+ auto inputType = op.getOperand().getType().dyn_cast<RankedTensorType>();
auto resultType = op.getType().dyn_cast<RankedTensorType>();
if (!inputType || !resultType)
return mlir::success();
mlir::Type type = getType(varType, vardecl.loc());
if (!type)
return nullptr;
- if (type != value->getType()) {
+ if (type != value.getType()) {
emitError(loc(vardecl.loc()))
<< "struct type of initializer is different than the variable "
"declaration. Got "
- << value->getType() << ", but expected " << type;
+ << value.getType() << ", but expected " << type;
return nullptr;
}
// Look through the input of the current transpose.
mlir::Value transposeInput = op.getOperand();
TransposeOp transposeInputOp =
- llvm::dyn_cast_or_null<TransposeOp>(transposeInput->getDefiningOp());
+ llvm::dyn_cast_or_null<TransposeOp>(transposeInput.getDefiningOp());
// If the input is defined by another Transpose, bingo!
if (!transposeInputOp)
// Reshape(Constant(x)) = x'
def ReshapeConstant :
- NativeCodeCall<"$0.reshape(($1->getType()).cast<ShapedType>())">;
+ NativeCodeCall<"$0.reshape(($1.getType()).cast<ShapedType>())">;
def FoldConstantReshapeOptPattern : Pat<
(ReshapeOp:$res (ConstantOp $arg)),
(ConstantOp (ReshapeConstant $arg, $res))>;
// on operand properties.
// Reshape(x) = x, where input and output shapes are identical
-def TypesAreIdentical : Constraint<CPred<"$0->getType() == $1->getType()">>;
+def TypesAreIdentical : Constraint<CPred<"$0.getType() == $1.getType()">>;
def RedundantReshapeOptPattern : Pat<
(ReshapeOp:$res $arg), (replaceWithValue $arg),
[(TypesAreIdentical $res, $arg)]>;
/// Return true if operation A dominates operation B.
bool dominates(Value a, Operation *b) {
- return (Operation *)a->getDefiningOp() == b || properlyDominates(a, b);
+ return (Operation *)a.getDefiningOp() == b || properlyDominates(a, b);
}
/// Return true if the specified block A dominates block B.
/// Returns the source MemRefType for this DMA operation.
Value getSrcMemRef() { return getOperand(getSrcMemRefOperandIndex()); }
MemRefType getSrcMemRefType() {
- return getSrcMemRef()->getType().cast<MemRefType>();
+ return getSrcMemRef().getType().cast<MemRefType>();
}
/// Returns the rank (number of indices) of the source MemRefType.
/// Returns the memory space of the src memref.
unsigned getSrcMemorySpace() {
- return getSrcMemRef()->getType().cast<MemRefType>().getMemorySpace();
+ return getSrcMemRef().getType().cast<MemRefType>().getMemorySpace();
}
/// Returns the operand index of the dst memref.
/// Returns the destination MemRefType for this DMA operations.
Value getDstMemRef() { return getOperand(getDstMemRefOperandIndex()); }
MemRefType getDstMemRefType() {
- return getDstMemRef()->getType().cast<MemRefType>();
+ return getDstMemRef().getType().cast<MemRefType>();
}
/// Returns the rank (number of indices) of the destination MemRefType.
unsigned getDstMemRefRank() {
- return getDstMemRef()->getType().cast<MemRefType>().getRank();
+ return getDstMemRef().getType().cast<MemRefType>().getRank();
}
/// Returns the memory space of the src memref.
unsigned getDstMemorySpace() {
- return getDstMemRef()->getType().cast<MemRefType>().getMemorySpace();
+ return getDstMemRef().getType().cast<MemRefType>().getMemorySpace();
}
/// Returns the affine map used to access the dst memref.
/// Returns the Tag MemRef for this DMA operation.
Value getTagMemRef() { return getOperand(getTagMemRefOperandIndex()); }
MemRefType getTagMemRefType() {
- return getTagMemRef()->getType().cast<MemRefType>();
+ return getTagMemRef().getType().cast<MemRefType>();
}
/// Returns the rank (number of indices) of the tag MemRefType.
unsigned getTagMemRefRank() {
- return getTagMemRef()->getType().cast<MemRefType>().getRank();
+ return getTagMemRef().getType().cast<MemRefType>().getRank();
}
/// Returns the affine map used to access the tag memref.
// Returns the Tag MemRef associated with the DMA operation being waited on.
Value getTagMemRef() { return getOperand(0); }
MemRefType getTagMemRefType() {
- return getTagMemRef()->getType().cast<MemRefType>();
+ return getTagMemRef().getType().cast<MemRefType>();
}
/// Returns the affine map used to access the tag memref.
// Returns the rank (number of indices) of the tag memref.
unsigned getTagMemRefRank() {
- return getTagMemRef()->getType().cast<MemRefType>().getRank();
+ return getTagMemRef().getType().cast<MemRefType>().getRank();
}
/// Returns the AffineMapAttr associated with 'memref'.
Value getMemRef() { return getOperand(getMemRefOperandIndex()); }
void setMemRef(Value value) { setOperand(getMemRefOperandIndex(), value); }
MemRefType getMemRefType() {
- return getMemRef()->getType().cast<MemRefType>();
+ return getMemRef().getType().cast<MemRefType>();
}
/// Get affine map operands.
void setMemRef(Value value) { setOperand(getMemRefOperandIndex(), value); }
MemRefType getMemRefType() {
- return getMemRef()->getType().cast<MemRefType>();
+ return getMemRef().getType().cast<MemRefType>();
}
/// Get affine map operands.
let extraClassDeclaration = [{
MemRefType getMemRefType() {
- return memref()->getType().cast<MemRefType>();
+ return memref().getType().cast<MemRefType>();
}
/// Returns the affine map used to index the memref for this operation.
let builders = [OpBuilder<
"Builder *b, OperationState &result, ICmpPredicate predicate, Value lhs, "
"Value rhs", [{
- LLVMDialect *dialect = &lhs->getType().cast<LLVMType>().getDialect();
+ LLVMDialect *dialect = &lhs.getType().cast<LLVMType>().getDialect();
build(b, result, LLVMType::getInt1Ty(dialect),
b->getI64IntegerAttr(static_cast<int64_t>(predicate)), lhs, rhs);
}]>];
let builders = [OpBuilder<
"Builder *b, OperationState &result, FCmpPredicate predicate, Value lhs, "
"Value rhs", [{
- LLVMDialect *dialect = &lhs->getType().cast<LLVMType>().getDialect();
+ LLVMDialect *dialect = &lhs.getType().cast<LLVMType>().getDialect();
build(b, result, LLVMType::getInt1Ty(dialect),
b->getI64IntegerAttr(static_cast<int64_t>(predicate)), lhs, rhs);
}]>];
let builders = [OpBuilder<
"Builder *b, OperationState &result, Value addr",
[{
- auto type = addr->getType().cast<LLVM::LLVMType>().getPointerElementTy();
+ auto type = addr.getType().cast<LLVM::LLVMType>().getPointerElementTy();
build(b, result, type, addr);
}]>];
let parser = [{ return parseLoadOp(parser, result); }];
"Builder *b, OperationState &result, Value container, Value value, "
"ArrayAttr position",
[{
- build(b, result, container->getType(), container, value, position);
+ build(b, result, container.getType(), container, value, position);
}]>];
let parser = [{ return parseInsertValueOp(parser, result); }];
let printer = [{ printInsertValueOp(p, *this); }];
"Builder *b, OperationState &result, Value v1, Value v2, "
"ArrayAttr mask, ArrayRef<NamedAttribute> attrs = {}">];
let verifier = [{
- auto wrappedVectorType1 = v1()->getType().cast<LLVM::LLVMType>();
- auto wrappedVectorType2 = v2()->getType().cast<LLVM::LLVMType>();
+ auto wrappedVectorType1 = v1().getType().cast<LLVM::LLVMType>();
+ auto wrappedVectorType2 = v2().getType().cast<LLVM::LLVMType>();
if (!wrappedVectorType2.getUnderlyingType()->isVectorTy())
return emitOpError("expected LLVM IR Dialect vector type for operand #2");
if (wrappedVectorType1.getVectorElementType() !=
let builders = [OpBuilder<
"Builder *b, OperationState &result, Value condition, Value lhs, "
"Value rhs", [{
- build(b, result, lhs->getType(), condition, lhs, rhs);
+ build(b, result, lhs.getType(), condition, lhs, rhs);
}]>];
let parser = [{ return parseSelectOp(parser, result); }];
let printer = [{ printSelectOp(p, *this); }];
private:
StructuredIndexed(Value v, ArrayRef<AffineExpr> indexings)
: value(v), exprs(indexings.begin(), indexings.end()) {
- assert(v->getType().isa<MemRefType>() && "MemRefType expected");
+ assert(v.getType().isa<MemRefType>() && "MemRefType expected");
}
StructuredIndexed(ValueHandle v, ArrayRef<AffineExpr> indexings)
: StructuredIndexed(v.getValue(), indexings) {}
Type getElementType() { return getShapedType().getElementType(); }
ShapedType getShapedType() { return getType().cast<ShapedType>(); }
unsigned getBaseViewRank() { return getBaseViewType().getRank(); }
- ShapedType getBaseViewType() { return view()->getType().cast<ShapedType>();}
+ ShapedType getBaseViewType() { return view().getType().cast<ShapedType>();}
// Get the underlying indexing at a given rank.
Value indexing(unsigned rank) { return *(indexings().begin() + rank); }
SmallVector<Value, 8> getRanges() {
SmallVector<Value, 8> res;
for (auto operand : indexings())
- if (!operand->getType().isa<IndexType>())
+ if (!operand.getType().isa<IndexType>())
res.push_back(operand);
return res;
}
let extraClassDeclaration = [{
static StringRef getPermutationAttrName() { return "permutation"; }
- ShapedType getShapedType() { return view()->getType().cast<ShapedType>(); }
+ ShapedType getShapedType() { return view().getType().cast<ShapedType>(); }
}];
}
ArrayAttr indexing_maps();
ArrayAttr iterator_types() {
- unsigned nPar = input()->getType().cast<ShapedType>().getRank();
+ unsigned nPar = input().getType().cast<ShapedType>().getRank();
MLIRContext *ctx = getContext();
SmallVector<Attribute, 8> iters(
nPar, StringAttr::get(getParallelIteratorTypeName(), ctx));
ArrayAttr indexing_maps();
ArrayAttr iterator_types() {
- unsigned nPar = input()->getType().cast<ShapedType>().getRank();
+ unsigned nPar = input().getType().cast<ShapedType>().getRank();
MLIRContext *ctx = getContext();
SmallVector<Attribute, 8> iters(
nPar, StringAttr::get(getParallelIteratorTypeName(), ctx));
}
/// Return the `i`-th input buffer type.
ShapedType getInputShapedType(unsigned i) {
- return getInput(i)->getType().template cast<ShapedType>();
+ return getInput(i).getType().template cast<ShapedType>();
}
/// Return the range over inputs.
Operation::operand_range getInputs() {
}
/// Return the `i`-th output buffer type.
ShapedType getOutputShapedType(unsigned i) {
- return getOutput(i)->getType().template cast<ShapedType>();
+ return getOutput(i).getType().template cast<ShapedType>();
}
/// Query whether the op has only MemRef input and outputs.
bool hasBufferSemantics() {
class HasOperandsOfType<string type>: CPred<[{
llvm::any_of(op.getOperands(),
[](Value v) {
- return dyn_cast_or_null<}] # type # [{>(v->getDefiningOp());
+ return dyn_cast_or_null<}] # type # [{>(v.getDefiningOp());
})
}]>;
SmallVector<Value, 8> getViewSizes(ConcreteOp linalgOp) {
SmallVector<Value, 8> res;
for (auto v : linalgOp.getInputsAndOutputs()) {
- MemRefType t = v->getType().template cast<MemRefType>();
+ MemRefType t = v.getType().template cast<MemRefType>();
for (unsigned i = 0; i < t.getRank(); ++i)
res.push_back(edsc::intrinsics::dim(v, i));
}
let results = (outs quant_RealValueType);
let verifier = [{
- auto tensorArg = arg()->getType().dyn_cast<TensorType>();
+ auto tensorArg = arg().getType().dyn_cast<TensorType>();
if (!tensorArg) return emitOpError("arg needs to be tensor type.");
// Verify layerStats attribute.
Value getSrcMemRef() { return getOperand(0); }
// Returns the rank (number of indices) of the source MemRefType.
unsigned getSrcMemRefRank() {
- return getSrcMemRef()->getType().cast<MemRefType>().getRank();
+ return getSrcMemRef().getType().cast<MemRefType>().getRank();
}
// Returns the source memref indices for this DMA operation.
operand_range getSrcIndices() {
Value getDstMemRef() { return getOperand(1 + getSrcMemRefRank()); }
// Returns the rank (number of indices) of the destination MemRefType.
unsigned getDstMemRefRank() {
- return getDstMemRef()->getType().cast<MemRefType>().getRank();
+ return getDstMemRef().getType().cast<MemRefType>().getRank();
}
unsigned getSrcMemorySpace() {
- return getSrcMemRef()->getType().cast<MemRefType>().getMemorySpace();
+ return getSrcMemRef().getType().cast<MemRefType>().getMemorySpace();
}
unsigned getDstMemorySpace() {
- return getDstMemRef()->getType().cast<MemRefType>().getMemorySpace();
+ return getDstMemRef().getType().cast<MemRefType>().getMemorySpace();
}
// Returns the destination memref indices for this DMA operation.
}
// Returns the rank (number of indices) of the tag MemRefType.
unsigned getTagMemRefRank() {
- return getTagMemRef()->getType().cast<MemRefType>().getRank();
+ return getTagMemRef().getType().cast<MemRefType>().getRank();
}
// Returns the tag memref index for this DMA operation.
// Returns the rank (number of indices) of the tag memref.
unsigned getTagMemRefRank() {
- return getTagMemRef()->getType().cast<MemRefType>().getRank();
+ return getTagMemRef().getType().cast<MemRefType>().getRank();
}
// Returns the number of elements transferred in the associated DMA operation.
let extraClassDeclaration = [{
static StringRef getAlignmentAttrName() { return "alignment"; }
- MemRefType getType() { return getResult()->getType().cast<MemRefType>(); }
+ MemRefType getType() { return getResult().getType().cast<MemRefType>(); }
/// Returns the number of symbolic operands (the ones in square brackets),
/// which bind to the symbols of the memref's layout map.
"ValueRange operands = {}", [{
result.operands.push_back(callee);
result.addOperands(operands);
- result.addTypes(callee->getType().cast<FunctionType>().getResults());
+ result.addTypes(callee.getType().cast<FunctionType>().getResults());
}]>];
let extraClassDeclaration = [{
let builders = [OpBuilder<
"Builder *builder, OperationState &result, Value aggregate,"
"ValueRange indices = {}", [{
- auto resType = aggregate->getType().cast<ShapedType>()
+ auto resType = aggregate.getType().cast<ShapedType>()
.getElementType();
build(builder, result, resType, aggregate, indices);
}]>];
let builders = [OpBuilder<
"Builder *, OperationState &result, Value memref,"
"ValueRange indices = {}", [{
- auto memrefType = memref->getType().cast<MemRefType>();
+ auto memrefType = memref.getType().cast<MemRefType>();
result.addOperands(memref);
result.addOperands(indices);
result.types.push_back(memrefType.getElementType());
Value getMemRef() { return getOperand(0); }
void setMemRef(Value value) { setOperand(0, value); }
MemRefType getMemRefType() {
- return getMemRef()->getType().cast<MemRefType>();
+ return getMemRef().getType().cast<MemRefType>();
}
operand_range getIndices() { return {operand_begin() + 1, operand_end()}; }
static bool areCastCompatible(Type a, Type b);
/// The result of a memref_cast is always a memref.
- Type getType() { return getResult()->getType(); }
+ Type getType() { return getResult().getType(); }
}];
}
let extraClassDeclaration = [{
MemRefType getMemRefType() {
- return memref()->getType().cast<MemRefType>();
+ return memref().getType().cast<MemRefType>();
}
static StringRef getLocalityHintAttrName() { return "localityHint"; }
static StringRef getIsWriteAttrName() { return "isWrite"; }
"Builder *builder, OperationState &result, Value condition,"
"Value trueValue, Value falseValue", [{
result.addOperands({condition, trueValue, falseValue});
- result.addTypes(trueValue->getType());
+ result.addTypes(trueValue.getType());
}]>];
let extraClassDeclaration = [{
Value getMemRef() { return getOperand(1); }
void setMemRef(Value value) { setOperand(1, value); }
MemRefType getMemRefType() {
- return getMemRef()->getType().cast<MemRefType>();
+ return getMemRef().getType().cast<MemRefType>();
}
operand_range getIndices() {
let extraClassDeclaration = [{
/// Returns the type of the base memref operand.
MemRefType getBaseMemRefType() {
- return source()->getType().cast<MemRefType>();
+ return source().getType().cast<MemRefType>();
}
/// The result of a subview is always a memref.
- MemRefType getType() { return getResult()->getType().cast<MemRefType>(); }
+ MemRefType getType() { return getResult().getType().cast<MemRefType>(); }
/// Returns as integer value the number of offset operands.
int64_t getNumOffsets() { return llvm::size(offsets()); }
static bool areCastCompatible(Type a, Type b);
/// The result of a tensor_cast is always a tensor.
- TensorType getType() { return getResult()->getType().cast<TensorType>(); }
+ TensorType getType() { return getResult().getType().cast<TensorType>(); }
}];
}
let builders = [OpBuilder<
"Builder *builder, OperationState &result, Value memref", [{
- auto memrefType = memref->getType().cast<MemRefType>();
+ auto memrefType = memref.getType().cast<MemRefType>();
auto resultType = RankedTensorType::get(memrefType.getShape(),
memrefType.getElementType());
result.addOperands(memref);
let extraClassDeclaration = [{
/// The result of a tensor_load is always a tensor.
- TensorType getType() { return getResult()->getType().cast<TensorType>(); }
+ TensorType getType() { return getResult().getType().cast<TensorType>(); }
}];
}
let extraClassDeclaration = [{
/// The result of a view is always a memref.
- MemRefType getType() { return getResult()->getType().cast<MemRefType>(); }
+ MemRefType getType() { return getResult().getType().cast<MemRefType>(); }
/// Returns the dynamic offset for this view operation if specified.
/// Returns nullptr if no dynamic offset was specified.
"Value acc, ArrayAttr indexingMaps, ArrayAttr iteratorTypes">];
let extraClassDeclaration = [{
VectorType getLhsType() {
- return lhs()->getType().cast<VectorType>();
+ return lhs().getType().cast<VectorType>();
}
VectorType getRhsType() {
- return rhs()->getType().cast<VectorType>();
+ return rhs().getType().cast<VectorType>();
}
VectorType getAccType() {
- return acc()->getType().cast<VectorType>();
+ return acc().getType().cast<VectorType>();
}
VectorType getLHSVectorMaskType() {
if (llvm::size(masks()) != 2) return VectorType();
- return getOperand(3)->getType().cast<VectorType>();
+ return getOperand(3).getType().cast<VectorType>();
}
VectorType getRHSVectorMaskType() {
if (llvm::size(masks()) != 2) return VectorType();
- return getOperand(4)->getType().cast<VectorType>();
+ return getOperand(4).getType().cast<VectorType>();
}
VectorType getResultType() {
- return getResult()->getType().cast<VectorType>();
+ return getResult().getType().cast<VectorType>();
}
ArrayRef<StringRef> getTraitAttrNames();
SmallVector<AffineMap, 4> getIndexingMaps();
```
}];
let extraClassDeclaration = [{
- Type getSourceType() { return source()->getType(); }
+ Type getSourceType() { return source().getType(); }
VectorType getVectorType() {
- return vector()->getType().cast<VectorType>();
+ return vector().getType().cast<VectorType>();
}
}];
}
let extraClassDeclaration = [{
static StringRef getMaskAttrName() { return "mask"; }
VectorType getV1VectorType() {
- return v1()->getType().cast<VectorType>();
+ return v1().getType().cast<VectorType>();
}
VectorType getV2VectorType() {
- return v2()->getType().cast<VectorType>();
+ return v2().getType().cast<VectorType>();
}
VectorType getVectorType() {
- return vector()->getType().cast<VectorType>();
+ return vector().getType().cast<VectorType>();
}
}];
}
}];
let extraClassDeclaration = [{
VectorType getVectorType() {
- return vector()->getType().cast<VectorType>();
+ return vector().getType().cast<VectorType>();
}
}];
}
let extraClassDeclaration = [{
static StringRef getPositionAttrName() { return "position"; }
VectorType getVectorType() {
- return vector()->getType().cast<VectorType>();
+ return vector().getType().cast<VectorType>();
}
}];
}
"ArrayRef<int64_t> strides">];
let extraClassDeclaration = [{
VectorType getSourceVectorType() {
- return vector()->getType().cast<VectorType>();
+ return vector().getType().cast<VectorType>();
}
TupleType getResultTupleType() {
- return getResult()->getType().cast<TupleType>();
+ return getResult().getType().cast<TupleType>();
}
void getSizes(SmallVectorImpl<int64_t> &results);
void getStrides(SmallVectorImpl<int64_t> &results);
```
}];
let extraClassDeclaration = [{
- Type getSourceType() { return source()->getType(); }
+ Type getSourceType() { return source().getType(); }
VectorType getDestVectorType() {
- return dest()->getType().cast<VectorType>();
+ return dest().getType().cast<VectorType>();
}
}];
}
"Value dest, ArrayRef<int64_t>">];
let extraClassDeclaration = [{
static StringRef getPositionAttrName() { return "position"; }
- Type getSourceType() { return source()->getType(); }
+ Type getSourceType() { return source().getType(); }
VectorType getDestVectorType() {
- return dest()->getType().cast<VectorType>();
+ return dest().getType().cast<VectorType>();
}
}];
}
let extraClassDeclaration = [{
TupleType getSourceTupleType() {
- return vectors()->getType().cast<TupleType>();
+ return vectors().getType().cast<TupleType>();
}
VectorType getResultVectorType() {
- return getResult()->getType().cast<VectorType>();
+ return getResult().getType().cast<VectorType>();
}
void getSizes(SmallVectorImpl<int64_t> &results);
void getStrides(SmallVectorImpl<int64_t> &results);
static StringRef getOffsetsAttrName() { return "offsets"; }
static StringRef getStridesAttrName() { return "strides"; }
VectorType getSourceVectorType() {
- return source()->getType().cast<VectorType>();
+ return source().getType().cast<VectorType>();
}
VectorType getDestVectorType() {
- return dest()->getType().cast<VectorType>();
+ return dest().getType().cast<VectorType>();
}
}];
}
}];
let extraClassDeclaration = [{
VectorType getOperandVectorTypeLHS() {
- return lhs()->getType().cast<VectorType>();
+ return lhs().getType().cast<VectorType>();
}
VectorType getOperandVectorTypeRHS() {
- return rhs()->getType().cast<VectorType>();
+ return rhs().getType().cast<VectorType>();
}
VectorType getOperandVectorTypeACC() {
return (llvm::size(acc()) == 0) ? VectorType() :
- (*acc().begin())->getType().cast<VectorType>();
+ (*acc().begin()).getType().cast<VectorType>();
}
VectorType getVectorType() {
- return getResult()->getType().cast<VectorType>();
+ return getResult().getType().cast<VectorType>();
}
}];
}
let extraClassDeclaration = [{
VectorType getInputVectorType() {
- return vector()->getType().cast<VectorType>();
+ return vector().getType().cast<VectorType>();
}
VectorType getOutputVectorType() {
- return getResult()->getType().cast<VectorType>();
+ return getResult().getType().cast<VectorType>();
}
/// Returns as integer value the number of input shape operands.
static StringRef getOffsetsAttrName() { return "offsets"; }
static StringRef getSizesAttrName() { return "sizes"; }
static StringRef getStridesAttrName() { return "strides"; }
- VectorType getVectorType(){ return vector()->getType().cast<VectorType>(); }
+ VectorType getVectorType(){ return vector().getType().cast<VectorType>(); }
void getOffsets(SmallVectorImpl<int64_t> &results);
}];
let hasCanonicalizer = 1;
let extraClassDeclaration = [{
MemRefType getMemRefType() {
- return memref()->getType().cast<MemRefType>();
+ return memref().getType().cast<MemRefType>();
}
VectorType getVectorType() {
- return vector()->getType().cast<VectorType>();
+ return vector().getType().cast<VectorType>();
}
}];
}
let extraClassDeclaration = [{
VectorType getVectorType() {
- return vector()->getType().cast<VectorType>();
+ return vector().getType().cast<VectorType>();
}
MemRefType getMemRefType() {
- return memref()->getType().cast<MemRefType>();
+ return memref().getType().cast<MemRefType>();
}
}];
}
let extraClassDeclaration = [{
MemRefType getMemRefType() {
- return memref()->getType().cast<MemRefType>();
+ return memref().getType().cast<MemRefType>();
}
MemRefType getResultMemRefType() {
- return getResult()->getType().cast<MemRefType>();
+ return getResult().getType().cast<MemRefType>();
}
}];
}
let extraClassDeclaration = [{
TupleType getResultTupleType() {
- return getResult()->getType().cast<TupleType>();
+ return getResult().getType().cast<TupleType>();
}
}];
}
let extraClassDeclaration = [{
VectorType getResultVectorType() {
- return getResult()->getType().cast<VectorType>();
+ return getResult().getType().cast<VectorType>();
}
int64_t getIndex() {
return getAttrOfType<IntegerAttr>("index").getValue().getSExtValue();
let verifier = ?;
let extraClassDeclaration = [{
Type getPrintType() {
- return source()->getType();
+ return source().getType();
}
}];
}
include "mlir/IR/OpBase.td"
class HasShape<list<int> shape> :
- CPred<"$0->getType().cast<ShapedType>().hasStaticShape({" #
+ CPred<"$0.getType().cast<ShapedType>().hasStaticShape({" #
StrJoinInt<shape>.result # "})">;
class UnrollVectorOp<list<int> factors> : NativeCodeCall<
- "unrollSingleResultOpMatchingType($_builder, $0->getDefiningOp(), " #
+ "unrollSingleResultOpMatchingType($_builder, $0.getDefiningOp(), " #
"{" # StrJoinInt<factors>.result # "})">;
#endif // VECTOR_TRANSFORM_PATTERNS
/// Value. An eager Value represents both the declaration and the definition
/// (in the PL sense) of a placeholder for an mlir::Value that has already
/// been constructed in the past and that is captured "now" in the program.
- explicit ValueHandle(Value v) : t(v->getType()), v(v) {}
+ explicit ValueHandle(Value v) : t(v.getType()), v(v) {}
/// Builds a ConstantIndexOp of value `cst`. The constant is created at the
/// current insertion point.
Operation *getOperation() const {
if (!v)
return nullptr;
- return v->getDefiningOp();
+ return v.getDefiningOp();
}
protected:
: ValueHandle(ScopedContext::getBuilder().getIndexType()) {}
explicit IndexHandle(index_t v) : ValueHandle(v) {}
explicit IndexHandle(Value v) : ValueHandle(v) {
- assert(v->getType() == ScopedContext::getBuilder().getIndexType() &&
+ assert(v.getType() == ScopedContext::getBuilder().getIndexType() &&
"Expected index type");
}
explicit IndexHandle(ValueHandle v) : ValueHandle(v) {
Attribute attr;
if (!constant_op_binder<Attribute>(&attr).match(op))
return false;
- auto type = op->getResult(0)->getType();
+ auto type = op->getResult(0).getType();
if (type.isIntOrIndex()) {
return attr_value_binder<IntegerAttr>(bind_value).match(attr);
MatcherClass, Operation *>::value,
bool>
matchOperandOrValueAtIndex(Operation *op, unsigned idx, MatcherClass &matcher) {
- if (auto defOp = op->getOperand(idx)->getDefiningOp())
+ if (auto defOp = op->getOperand(idx).getDefiningOp())
return matcher.match(defOp);
return false;
}
template <typename Pattern>
inline bool matchPattern(Value value, const Pattern &pattern) {
// TODO: handle other cases
- if (auto *op = value->getDefiningOp())
+ if (auto *op = value.getDefiningOp())
return const_cast<Pattern &>(pattern).match(op);
return false;
}
// constraints on each type definition reads naturally and we want to attach
// type constraints directly to an operand/result, $_self will be replaced
// by the operand/result's type. E.g., for `F32` in `F32:$operand`, its
-// `$_self` will be expanded as `getOperand(...)->getType()`.
+// `$_self` will be expanded as `getOperand(...).getType()`.
class CPred<code pred> : Pred {
code predExpr = "(" # pred # ")";
}
//===----------------------------------------------------------------------===//
def HasNoUseOf: Constraint<
- CPred<"$_self->use_begin() == $_self->use_end()">, "has no use">;
+ CPred<"$_self.use_empty()">, "has no use">;
//===----------------------------------------------------------------------===//
// Common op type constraints
// Type Constraint operand `idx`'s Element type is `type`.
class TCopVTEtIs<int idx, Type type> : And<[
CPred<"$_op.getNumOperands() > " # idx>,
- SubstLeaves<"$_self", "$_op.getOperand(" # idx # ")->getType()",
+ SubstLeaves<"$_self", "$_op.getOperand(" # idx # ").getType()",
IsShapedTypePred>,
SubstLeaves<"$_self", "getElementTypeOrSelf($_op.getOperand(" # idx # "))",
type.predicate>]>;
// type.
class TCopVTEtIsSameAs<int i, int j> : And<[
CPred<"$_op.getNumOperands() > std::max(" # i # "u," # j # "u)">,
- SubstLeaves<"$_self", "$_op.getOperand(" # i # ")->getType()",
+ SubstLeaves<"$_self", "$_op.getOperand(" # i # ").getType()",
IsShapedTypePred>,
- SubstLeaves<"$_self", "$_op.getOperand(" # j # ")->getType()",
+ SubstLeaves<"$_self", "$_op.getOperand(" # j # ").getType()",
IsShapedTypePred>,
CPred<"mlir::getElementTypeOrSelf($_op.getOperand(" # i # ")) == "
"mlir::getElementTypeOrSelf($_op.getOperand(" # j # "))">]>;
class TCOpResIsShapedTypePred<int i, int j> : And<[
CPred<"$_op.getNumResults() > " # i>,
CPred<"$_op.getNumOperands() > " # j>,
- SubstLeaves<"$_self", "$_op.getResult(" # i # ")->getType()",
+ SubstLeaves<"$_self", "$_op.getResult(" # i # ").getType()",
IsShapedTypePred>,
- SubstLeaves<"$_self", "$_op.getOperand(" # j # ")->getType()",
+ SubstLeaves<"$_self", "$_op.getOperand(" # j # ").getType()",
IsShapedTypePred>]>;
// Predicate to verify that the i'th result and the j'th operand have the same
// type.
class TCresIsSameAsOpBase<int i, int j> :
- CPred<"$_op.getResult(" # i # ")->getType() == "
- "$_op.getOperand(" # j # ")->getType()">;
+ CPred<"$_op.getResult(" # i # ").getType() == "
+ "$_op.getOperand(" # j # ").getType()">;
// Basic Predicate to verify that the i'th result and the j'th operand have the
// same elemental type.
class TCOpIsBroadcastableToRes<int opId, int resId> : And<[
TCOpResIsShapedTypePred<opId, resId>,
CPred<"OpTrait::util::getBroadcastedType("
- "$_op.getOperand(" # opId # ")->getType(), "
- "$_op.getResult(" # resId # ")->getType())">]>;
+ "$_op.getOperand(" # opId # ").getType(), "
+ "$_op.getResult(" # resId # ").getType())">]>;
// Predicate to verify that all the operands at the given `indices`
// have the same element type.
}
/// Return the type of the `i`-th result.
- Type getType(unsigned i) { return getResult(i)->getType(); }
+ Type getType(unsigned i) { return getResult(i).getType(); }
/// Result iterator access.
result_iterator result_begin() {
class OneResult : public TraitBase<ConcreteType, OneResult> {
public:
Value getResult() { return this->getOperation()->getResult(0); }
- Type getType() { return getResult()->getType(); }
+ Type getType() { return getResult().getType(); }
/// Replace all uses of 'this' value with the new value, updating anything in
/// the IR that uses 'this' to use the other value instead. When this returns
/// there are zero uses of 'this'.
void replaceAllUsesWith(Value newValue) {
- getResult()->replaceAllUsesWith(newValue);
+ getResult().replaceAllUsesWith(newValue);
}
/// Replace all uses of 'this' value with the result of 'op'.
os << "(";
interleaveComma(op->getNonSuccessorOperands(), os, [&](Value operand) {
if (operand)
- printType(operand->getType());
+ printType(operand.getType());
else
os << "<<NULL>";
});
os << ") -> ";
if (op->getNumResults() == 1 &&
- !op->getResult(0)->getType().isa<FunctionType>()) {
- printType(op->getResult(0)->getType());
+ !op->getResult(0).getType().isa<FunctionType>()) {
+ printType(op->getResult(0).getType());
} else {
os << '(';
interleaveComma(op->getResultTypes(), os);
auto valueIt = values.begin();
for (unsigned i = 0, e = getNumResults(); i != e; ++i)
- getResult(i)->replaceAllUsesWith(*(valueIt++));
+ getResult(i).replaceAllUsesWith(*(valueIt++));
}
/// Replace all uses of results of this operation with results of 'op'.
void replaceAllUsesWith(Operation *op) {
assert(getNumResults() == op->getNumResults());
for (unsigned i = 0, e = getNumResults(); i != e; ++i)
- getResult(i)->replaceAllUsesWith(op->getResult(i));
+ getResult(i).replaceAllUsesWith(op->getResult(i));
}
/// Destroys this operation and its subclass data.
return U(ownerAndKind);
}
- /// Temporary methods to enable transition of Value to being used as a
- /// value-type.
- /// TODO(riverriddle) Remove these when all usages have been removed.
- Value operator*() const { return *this; }
- Value *operator->() const { return const_cast<Value *>(this); }
-
operator bool() const { return ownerAndKind.getPointer(); }
bool operator==(const Value &other) const {
return ownerAndKind == other.ownerAndKind;
/// If this value is the result of an operation, use it as a location,
/// otherwise return an unknown location.
- Location getLoc();
+ Location getLoc() const;
/// Return the Region in which this Value is defined.
Region *getParentRegion();
public:
using Value::Value;
- /// Temporary methods to enable transition of Value to being used as a
- /// value-type.
- /// TODO(riverriddle) Remove this when all usages have been removed.
- BlockArgument *operator->() { return this; }
-
static bool classof(Value value) {
return value.getKind() == Kind::BlockArgument;
}
public:
using Value::Value;
- /// Temporary methods to enable transition of Value to being used as a
- /// value-type.
- /// TODO(riverriddle) Remove these when all usages have been removed.
- OpResult operator*() { return *this; }
- OpResult *operator->() { return this; }
-
static bool classof(Value value) {
return value.getKind() != Kind::BlockArgument;
}
return n->getKind() == Kind::Anchor || n->getKind() == Kind::ResultAnchor;
}
- Operation *getOp() const final { return resultValue->getDefiningOp(); }
+ Operation *getOp() const final { return resultValue.getDefiningOp(); }
Value getValue() const final { return resultValue; }
void printLabel(raw_ostream &os) const override;
template <typename Range>
bool areValuesDefinedAbove(Range values, Region &limit) {
for (Value v : values)
- if (!v->getParentRegion()->isProperAncestor(&limit))
+ if (!v.getParentRegion()->isProperAncestor(&limit))
return false;
return true;
}
while (!worklist.empty()) {
State &state = worklist.back();
- auto *opInst = state.value->getDefiningOp();
+ auto *opInst = state.value.getDefiningOp();
// Note: getDefiningOp will return nullptr if the operand is not an
// Operation (i.e. block argument), which is a terminator for the search.
if (!isa_and_nonnull<AffineApplyOp>(opInst)) {
auto symbol = operands[i];
assert(isValidSymbol(symbol));
// Check if the symbol is a constant.
- if (auto cOp = dyn_cast_or_null<ConstantIndexOp>(symbol->getDefiningOp()))
+ if (auto cOp = dyn_cast_or_null<ConstantIndexOp>(symbol.getDefiningOp()))
dependenceDomain->setIdToConstant(valuePosMap.getSymPos(symbol),
cOp.getValue());
}
unsigned d = offset;
for (auto aDimValue : aDimValues) {
unsigned loc;
- if (B->findId(*aDimValue, &loc)) {
+ if (B->findId(aDimValue, &loc)) {
assert(loc >= offset && "A's dim appears in B's aligned range");
assert(loc < B->getNumDimIds() &&
"A's dim appears in B's non-dim position");
unsigned s = B->getNumDimIds();
for (auto aSymValue : aSymValues) {
unsigned loc;
- if (B->findId(*aSymValue, &loc)) {
+ if (B->findId(aSymValue, &loc)) {
assert(loc >= B->getNumDimIds() && loc < B->getNumDimAndSymbolIds() &&
"A's symbol appears in B's non-symbol position");
swapId(B, s, loc);
// Dims and symbols.
for (unsigned i = 0, e = vMap->getNumOperands(); i < e; i++) {
unsigned loc;
- bool ret = findId(*vMap->getOperand(i), &loc);
+ bool ret = findId(vMap->getOperand(i), &loc);
assert(ret && "value map's id can't be found");
(void)ret;
// Negate 'eq[r]' since the newly added dimension will be set to this one.
}
// Turn each symbol in 'loopIVs' into a dim identifier.
for (auto iv : loopIVs) {
- turnSymbolIntoDim(this, *iv);
+ turnSymbolIntoDim(this, iv);
}
}
void FlatAffineConstraints::addInductionVarOrTerminalSymbol(Value id) {
- if (containsId(*id))
+ if (containsId(id))
return;
// Caller is expected to fully compose map/operands if necessary.
// Add top level symbol.
addSymbolId(getNumSymbolIds(), id);
// Check if the symbol is a constant.
- if (auto constOp = dyn_cast_or_null<ConstantIndexOp>(id->getDefiningOp()))
- setIdToConstant(*id, constOp.getValue());
+ if (auto constOp = dyn_cast_or_null<ConstantIndexOp>(id.getDefiningOp()))
+ setIdToConstant(id, constOp.getValue());
}
LogicalResult FlatAffineConstraints::addAffineForOpDomain(AffineForOp forOp) {
unsigned pos;
// Pre-condition for this method.
- if (!findId(*forOp.getInductionVar(), &pos)) {
+ if (!findId(forOp.getInductionVar(), &pos)) {
assert(false && "Value not found");
return failure();
}
localVarCst.setIdValues(0, localVarCst.getNumDimAndSymbolIds(), operands);
for (auto operand : operands) {
unsigned pos;
- if (findId(*operand, &pos)) {
+ if (findId(operand, &pos)) {
if (pos >= getNumDimIds() && pos < getNumDimAndSymbolIds()) {
// If the local var cst has this as a dim, turn it into its symbol.
- turnDimIntoSymbol(&localVarCst, *operand);
+ turnDimIntoSymbol(&localVarCst, operand);
} else if (pos < getNumDimIds()) {
// Or vice versa.
- turnSymbolIntoDim(&localVarCst, *operand);
+ turnSymbolIntoDim(&localVarCst, operand);
}
}
}
unsigned numOperands = operands.size();
for (auto operand : operands) {
unsigned pos;
- if (!findId(*operand, &pos))
+ if (!findId(operand, &pos))
assert(0 && "expected to be found");
positions.push_back(pos);
}
for (unsigned i = 0, e = lbMaps.size(); i < e; ++i) {
unsigned pos;
- if (!findId(*values[i], &pos))
+ if (!findId(values[i], &pos))
continue;
AffineMap lbMap = lbMaps[i];
void FlatAffineConstraints::projectOut(Value id) {
unsigned pos;
- bool ret = findId(*id, &pos);
+ bool ret = findId(id, &pos);
assert(ret);
(void)ret;
FourierMotzkinEliminate(pos);
callee = SymbolTable::lookupNearestSymbolFrom(from,
symbolRef.getRootReference());
else
- callee = callable.get<Value>()->getDefiningOp();
+ callee = callable.get<Value>().getDefiningOp();
// If the callee is non-null and is a valid callable object, try to get the
// called region from it.
/// Return true if value A properly dominates operation B.
bool DominanceInfo::properlyDominates(Value a, Operation *b) {
- if (auto *aOp = a->getDefiningOp()) {
+ if (auto *aOp = a.getDefiningOp()) {
// The values defined by an operation do *not* dominate any nested
// operations.
if (aOp->getParentRegion() != b->getParentRegion() && aOp->isAncestor(b))
// block arguments properly dominate all operations in their own block, so
// we use a dominates check here, not a properlyDominates check.
- return dominates(a.cast<BlockArgument>()->getOwner(), b->getBlock());
+ return dominates(a.cast<BlockArgument>().getOwner(), b->getBlock());
}
DominanceInfoNode *DominanceInfo::getNode(Block *a) {
// properties of the program, the uses must occur after
// the definition. Therefore, we do not have to check
// additional conditions to detect an escaping value.
- for (OpOperand &use : result->getUses())
+ for (OpOperand &use : result.getUses())
if (use.getOwner()->getBlock() != block) {
outValues.insert(result);
break;
// Start with the defining block
Block *currentBlock;
- if (Operation *defOp = value->getDefiningOp())
+ if (Operation *defOp = value.getDefiningOp())
currentBlock = defOp->getBlock();
else
- currentBlock = value.cast<BlockArgument>()->getOwner();
+ currentBlock = value.cast<BlockArgument>().getOwner();
toProcess.push_back(currentBlock);
visited.insert(currentBlock);
// Start with all associated blocks
- for (OpOperand &use : value->getUses()) {
+ for (OpOperand &use : value.getUses()) {
Block *useBlock = use.getOwner()->getBlock();
if (visited.insert(useBlock).second)
toProcess.push_back(useBlock);
// Local printing helpers
auto printValueRef = [&](Value value) {
- if (Operation *defOp = value->getDefiningOp())
+ if (Operation *defOp = value.getDefiningOp())
os << "val_" << defOp->getName();
else {
auto blockArg = value.cast<BlockArgument>();
- os << "arg" << blockArg->getArgNumber() << "@"
- << blockIds[blockArg->getOwner()];
+ os << "arg" << blockArg.getArgNumber() << "@"
+ << blockIds[blockArg.getOwner()];
}
os << " ";
};
/// Gets the start operation for the given value
/// (must be referenced in this block).
Operation *LivenessBlockInfo::getStartOperation(Value value) const {
- Operation *definingOp = value->getDefiningOp();
+ Operation *definingOp = value.getDefiningOp();
// The given value is either live-in or is defined
// in the scope of this block.
if (isLiveIn(value) || !definingOp)
// Resolve the last operation (must exist by definition).
Operation *endOperation = startOperation;
- for (OpOperand &use : value->getUses()) {
+ for (OpOperand &use : value.getUses()) {
Operation *useOperation = use.getOwner();
// Check whether the use is in our block and after
// the current end operation.
/// conservative.
static bool isAccessIndexInvariant(Value iv, Value index) {
assert(isForInductionVar(iv) && "iv must be a AffineForOp");
- assert(index->getType().isa<IndexType>() && "index must be of IndexType");
+ assert(index.getType().isa<IndexType>() && "index must be of IndexType");
SmallVector<Operation *, 4> affineApplyOps;
getReachableAffineApplyOps({index}, affineApplyOps);
// Validate the results of this operation if it were to be shifted.
for (unsigned i = 0, e = op.getNumResults(); i < e; ++i) {
Value result = op.getResult(i);
- for (auto *user : result->getUsers()) {
+ for (auto *user : result.getUsers()) {
// If an ancestor operation doesn't lie in the block of forOp,
// there is no shift to check.
if (auto *ancOp = forBody->findAncestorOpInBlock(*user)) {
}
if (auto forOp = dyn_cast<AffineForOp>(op)) {
- for (auto *ownerInst : forOp.getInductionVar()->getUsers())
+ for (auto *ownerInst : forOp.getInductionVar().getUsers())
if (forwardSlice->count(ownerInst) == 0)
getForwardSliceImpl(ownerInst, forwardSlice, filter);
} else if (auto forOp = dyn_cast<loop::ForOp>(op)) {
- for (auto *ownerInst : forOp.getInductionVar()->getUsers())
+ for (auto *ownerInst : forOp.getInductionVar().getUsers())
if (forwardSlice->count(ownerInst) == 0)
getForwardSliceImpl(ownerInst, forwardSlice, filter);
} else {
assert(op->getNumRegions() == 0 && "unexpected generic op with regions");
assert(op->getNumResults() <= 1 && "unexpected multiple results");
if (op->getNumResults() > 0) {
- for (auto *ownerInst : op->getResult(0)->getUsers())
+ for (auto *ownerInst : op->getResult(0).getUsers())
if (forwardSlice->count(ownerInst) == 0)
getForwardSliceImpl(ownerInst, forwardSlice, filter);
}
auto *loopOp = loopIv.getOperation();
if (backwardSlice->count(loopOp) == 0)
getBackwardSliceImpl(loopOp, backwardSlice, filter);
- } else if (blockArg->getOwner() !=
+ } else if (blockArg.getOwner() !=
&op->getParentOfType<FuncOp>().getBody().front()) {
op->emitError("unsupported CF for operand ") << en.index();
llvm_unreachable("Unsupported control flow");
}
continue;
}
- auto *op = operand->getDefiningOp();
+ auto *op = operand.getDefiningOp();
if (backwardSlice->count(op) == 0) {
getBackwardSliceImpl(op, backwardSlice, filter);
}
static void DFSPostorder(Operation *current, DFSState *state) {
assert(current->getNumResults() <= 1 && "NYI: multi-result");
if (current->getNumResults() > 0) {
- for (auto &u : current->getResult(0)->getUses()) {
+ for (auto &u : current->getResult(0).getUses()) {
auto *op = u.getOwner();
DFSPostorder(op, state);
}
// Add loop bound constraints for values which are loop IVs and equality
// constraints for symbols which are constants.
for (const auto &value : values) {
- assert(cst->containsId(*value) && "value expected to be present");
+ assert(cst->containsId(value) && "value expected to be present");
if (isValidSymbol(value)) {
// Check if the symbol is a constant.
- if (auto cOp = dyn_cast_or_null<ConstantIndexOp>(value->getDefiningOp()))
- cst->setIdToConstant(*value, cOp.getValue());
+ if (auto cOp = dyn_cast_or_null<ConstantIndexOp>(value.getDefiningOp()))
+ cst->setIdToConstant(value, cOp.getValue());
} else if (auto loop = getForInductionVarOwner(value)) {
if (failed(cst->addAffineForOpDomain(loop)))
return failure();
}
unsigned MemRefRegion::getRank() const {
- return memref->getType().cast<MemRefType>().getRank();
+ return memref.getType().cast<MemRefType>().getRank();
}
Optional<int64_t> MemRefRegion::getConstantBoundingSizeAndShape(
SmallVectorImpl<int64_t> *shape, std::vector<SmallVector<int64_t, 4>> *lbs,
SmallVectorImpl<int64_t> *lbDivisors) const {
- auto memRefType = memref->getType().cast<MemRefType>();
+ auto memRefType = memref.getType().cast<MemRefType>();
unsigned rank = memRefType.getRank();
if (shape)
shape->reserve(rank);
auto symbol = operand;
assert(isValidSymbol(symbol));
// Check if the symbol is a constant.
- if (auto *op = symbol->getDefiningOp()) {
+ if (auto *op = symbol.getDefiningOp()) {
if (auto constOp = dyn_cast<ConstantIndexOp>(op)) {
- cst.setIdToConstant(*symbol, constOp.getValue());
+ cst.setIdToConstant(symbol, constOp.getValue());
}
}
}
// to guard against potential over-approximation from projection.
// TODO(andydavis) Support dynamic memref dimensions.
if (addMemRefDimBounds) {
- auto memRefType = memref->getType().cast<MemRefType>();
+ auto memRefType = memref.getType().cast<MemRefType>();
for (unsigned r = 0; r < rank; r++) {
cst.addConstantLowerBound(r, 0);
int64_t dimSize = memRefType.getDimSize(r);
// Returns the size of the region.
Optional<int64_t> MemRefRegion::getRegionSize() {
- auto memRefType = memref->getType().cast<MemRefType>();
+ auto memRefType = memref.getType().cast<MemRefType>();
auto layoutMaps = memRefType.getAffineMaps();
if (layoutMaps.size() > 1 ||
}
unsigned MemRefAccess::getRank() const {
- return memref->getType().cast<MemRefType>().getRank();
+ return memref.getType().cast<MemRefType>().getRank();
}
bool MemRefAccess::isStore() const { return isa<AffineStoreOp>(opInst); }
}
return false;
} else if (op.getNumResults() == 1) {
- if (auto v = op.getResult(0)->getType().dyn_cast<VectorType>()) {
+ if (auto v = op.getResult(0).getType().dyn_cast<VectorType>()) {
superVectorType = v;
} else {
// Not a vector type.
LogicalResult OperationVerifier::verifyBlock(Block &block) {
for (auto arg : block.getArguments())
- if (arg->getOwner() != &block)
+ if (arg.getOwner() != &block)
return emitError(block, "block argument not owned by block");
// Verify that this block has a terminator.
auto diag = op.emitError("operand #")
<< operandNo << " does not dominate this use";
- if (auto *useOp = operand->getDefiningOp())
+ if (auto *useOp = operand.getDefiningOp())
diag.attachNote(useOp->getLoc()) << "operand defined here";
return failure();
}
std::is_base_of<OpTrait::OneResult<SourceOp>, SourceOp>::value,
"expected single result op");
- LLVMType resultType = lowering.convertType(op->getResult(0)->getType())
+ LLVMType resultType = lowering.convertType(op->getResult(0).getType())
.template cast<LLVM::LLVMType>();
LLVMType funcType = getFunctionType(resultType, operands);
StringRef funcName = getFunctionName(resultType);
using LLVM::LLVMType;
SmallVector<LLVMType, 1> operandTypes;
for (Value operand : operands) {
- operandTypes.push_back(operand->getType().cast<LLVMType>());
+ operandTypes.push_back(operand.getType().cast<LLVMType>());
}
return LLVMType::getFunctionTy(resultType, operandTypes,
/*isVarArg=*/false);
arraySize, /*alignment=*/0);
for (unsigned idx = 0; idx < numKernelOperands; ++idx) {
auto operand = launchOp.getKernelOperand(idx);
- auto llvmType = operand->getType().cast<LLVM::LLVMType>();
+ auto llvmType = operand.getType().cast<LLVM::LLVMType>();
Value memLocation = builder.create<LLVM::AllocaOp>(
loc, llvmType.getPointerTo(), one, /*alignment=*/1);
builder.create<LLVM::StoreOp>(loc, operand, memLocation);
Value operand = operands.front();
// TODO(csigg): Generalize to other types of accumulation.
- assert(op->getOperand(0)->getType().isIntOrFloat());
+ assert(op->getOperand(0).getType().isIntOrFloat());
// Create the reduction using an accumulator factory.
AccumulatorFactory factory =
return getFactory(allReduce.body());
}
if (allReduce.op()) {
- auto type = operand->getType().cast<LLVM::LLVMType>();
+ auto type = operand.getType().cast<LLVM::LLVMType>();
return getFactory(*allReduce.op(), type.getUnderlyingType());
}
return AccumulatorFactory();
// Return accumulator result.
rewriter.setInsertionPointToStart(split);
- return split->addArgument(lhs->getType());
+ return split->addArgument(lhs.getType());
});
}
template <typename T> AccumulatorFactory getFactory() const {
return [](Location loc, Value lhs, Value rhs,
ConversionPatternRewriter &rewriter) {
- return rewriter.create<T>(loc, lhs->getType(), lhs, rhs);
+ return rewriter.create<T>(loc, lhs.getType(), lhs, rhs);
};
}
Value createBlockReduce(Location loc, Value operand,
AccumulatorFactory &accumFactory,
ConversionPatternRewriter &rewriter) const {
- auto type = operand->getType().cast<LLVM::LLVMType>();
+ auto type = operand.getType().cast<LLVM::LLVMType>();
// Create shared memory array to store the warp reduction.
- auto module = operand->getDefiningOp()->getParentOfType<ModuleOp>();
+ auto module = operand.getDefiningOp()->getParentOfType<ModuleOp>();
assert(module && "op must belong to a module");
Value sharedMemPtr =
createSharedMemoryArray(loc, module, type, kWarpSize, rewriter);
assert(thenOperands.size() == elseOperands.size());
rewriter.setInsertionPointToStart(continueBlock);
for (auto operand : thenOperands)
- continueBlock->addArgument(operand->getType());
+ continueBlock->addArgument(operand.getType());
}
/// Shortcut for createIf with empty else block and no block operands.
loc, int32Type, rewriter.getI32IntegerAttr(kWarpSize));
Value isPartialWarp = rewriter.create<LLVM::ICmpOp>(
loc, LLVM::ICmpPredicate::slt, activeWidth, warpSize);
- auto type = operand->getType().cast<LLVM::LLVMType>();
+ auto type = operand.getType().cast<LLVM::LLVMType>();
createIf(
loc, rewriter, isPartialWarp,
/// Returns value divided by the warp size (i.e. 32).
Value getDivideByWarpSize(Value value,
ConversionPatternRewriter &rewriter) const {
- auto loc = value->getLoc();
+ auto loc = value.getLoc();
auto warpSize = rewriter.create<LLVM::ConstantOp>(
loc, int32Type, rewriter.getI32IntegerAttr(kWarpSize));
return rewriter.create<LLVM::SDivOp>(loc, int32Type, value, warpSize);
gpu::ShuffleOpOperandAdaptor adaptor(operands);
auto dialect = lowering.getDialect();
- auto valueTy = adaptor.value()->getType().cast<LLVM::LLVMType>();
+ auto valueTy = adaptor.value().getType().cast<LLVM::LLVMType>();
auto int32Type = LLVM::LLVMType::getInt32Ty(dialect);
auto predTy = LLVM::LLVMType::getInt1Ty(dialect);
auto resultTy = LLVM::LLVMType::getStructTy(dialect, {valueTy, predTy});
for (auto en : llvm::enumerate(gpuFuncOp.getWorkgroupAttributions())) {
Value attribution = en.value();
- auto type = attribution->getType().dyn_cast<MemRefType>();
+ auto type = attribution.getType().dyn_cast<MemRefType>();
assert(type && type.hasStaticShape() && "unexpected type in attribution");
uint64_t numElements = type.getNumElements();
// otherwise necessary given that memref sizes are fixed, but we can try
// and canonicalize that away later.
Value attribution = gpuFuncOp.getWorkgroupAttributions()[en.index()];
- auto type = attribution->getType().cast<MemRefType>();
+ auto type = attribution.getType().cast<MemRefType>();
auto descr = MemRefDescriptor::fromStaticShape(rewriter, loc, lowering,
type, memory);
signatureConversion.remapInput(numProperArguments + en.index(), descr);
auto int64Ty = LLVM::LLVMType::getInt64Ty(lowering.getDialect());
for (auto en : llvm::enumerate(gpuFuncOp.getPrivateAttributions())) {
Value attribution = en.value();
- auto type = attribution->getType().cast<MemRefType>();
+ auto type = attribution.getType().cast<MemRefType>();
assert(type && type.hasStaticShape() &&
"unexpected type in attribution");
// Create the new induction variable to use.
BlockArgument newIndVar =
- header->addArgument(forOperands.lowerBound()->getType());
+ header->addArgument(forOperands.lowerBound().getType());
Block *body = forOp.getBody();
// Apply signature conversion to the body of the forOp. It has a single block,
// Add the step to the induction variable and branch to the header.
Value updatedIndVar = rewriter.create<spirv::IAddOp>(
- loc, newIndVar->getType(), newIndVar, forOperands.step());
+ loc, newIndVar.getType(), newIndVar, forOperands.step());
rewriter.create<spirv::BranchOp>(loc, header, updatedIndVar);
rewriter.eraseOp(forOp);
ConversionPatternRewriter &rewriter) const override {
auto rangeOp = cast<RangeOp>(op);
auto rangeDescriptorTy =
- convertLinalgType(rangeOp.getResult()->getType(), lowering);
+ convertLinalgType(rangeOp.getResult().getType(), lowering);
edsc::ScopedContext context(rewriter, op->getLoc());
for (int i = 0, e = memRefType.getRank(); i < e; ++i) {
Value indexing = adaptor.indexings()[i];
Value min = indexing;
- if (sliceOp.indexing(i)->getType().isa<RangeType>())
+ if (sliceOp.indexing(i).getType().isa<RangeType>())
min = extractvalue(int64Ty, indexing, pos(0));
baseOffset = add(baseOffset, mul(min, strides[i]));
}
int numNewDims = 0;
for (auto en : llvm::enumerate(sliceOp.indexings())) {
Value indexing = en.value();
- if (indexing->getType().isa<RangeType>()) {
+ if (indexing.getType().isa<RangeType>()) {
int rank = en.index();
Value rangeDescriptor = adaptor.indexings()[rank];
Value min = extractvalue(int64Ty, rangeDescriptor, pos(0));
// Return true if the value is obviously a constant "one".
static bool isConstantOne(Value value) {
- if (auto def = dyn_cast_or_null<ConstantIndexOp>(value->getDefiningOp()))
+ if (auto def = dyn_cast_or_null<ConstantIndexOp>(value.getDefiningOp()))
return def.getValue() == 1;
return false;
}
Value ivReplacement =
builder.create<AddIOp>(rootForOp.getLoc(), *lbArgumentIt, id);
- en.value()->replaceAllUsesWith(ivReplacement);
+ en.value().replaceAllUsesWith(ivReplacement);
replaceAllUsesInRegionWith(steps[en.index()], *stepArgumentIt,
launchOp.body());
std::advance(lbArgumentIt, 1);
/*============================================================================*/
StructBuilder::StructBuilder(Value v) : value(v) {
assert(value != nullptr && "value cannot be null");
- structType = value->getType().cast<LLVM::LLVMType>();
+ structType = value.getType().cast<LLVM::LLVMType>();
}
Value StructBuilder::extractPtr(OpBuilder &builder, Location loc,
MemRefDescriptor::MemRefDescriptor(Value descriptor)
: StructBuilder(descriptor) {
assert(value != nullptr && "value cannot be null");
- indexType = value->getType().cast<LLVM::LLVMType>().getStructElementType(
+ indexType = value.getType().cast<LLVM::LLVMType>().getStructElementType(
kOffsetPosInMemRefDescriptor);
}
}
LLVM::LLVMType MemRefDescriptor::getElementType() {
- return value->getType().cast<LLVM::LLVMType>().getStructElementType(
+ return value.getType().cast<LLVM::LLVMType>().getStructElementType(
kAlignedPtrPosInMemRefDescriptor);
}
SmallVector<Value, 4> results;
results.reserve(numResults);
for (unsigned i = 0; i < numResults; ++i) {
- auto type = this->lowering.convertType(op->getResult(i)->getType());
+ auto type = this->lowering.convertType(op->getResult(i).getType());
results.push_back(rewriter.create<LLVM::ExtractValueOp>(
op->getLoc(), type, newOp.getOperation()->getResult(0),
rewriter.getI64ArrayAttr(i)));
// Cannot convert ops if their operands are not of LLVM type.
for (Value operand : operands) {
- if (!operand || !operand->getType().isa<LLVM::LLVMType>())
+ if (!operand || !operand.getType().isa<LLVM::LLVMType>())
return this->matchFailure();
}
auto loc = op->getLoc();
- auto llvmArrayTy = operands[0]->getType().cast<LLVM::LLVMType>();
+ auto llvmArrayTy = operands[0].getType().cast<LLVM::LLVMType>();
if (!llvmArrayTy.isArrayTy()) {
auto newOp = rewriter.create<TargetOp>(
- op->getLoc(), operands[0]->getType(), operands, op->getAttrs());
+ op->getLoc(), operands[0].getType(), operands, op->getAttrs());
rewriter.replaceOp(op, newOp.getResult());
return this->matchSuccess();
}
- auto vectorType = op->getResult(0)->getType().dyn_cast<VectorType>();
+ auto vectorType = op->getResult(0).getType().dyn_cast<VectorType>();
if (!vectorType)
return this->matchFailure();
auto vectorTypeInfo = extractNDVectorTypeInfo(vectorType, this->lowering);
Value subbed =
rewriter.create<LLVM::SubOp>(loc, alignmentValue, ptrModAlign);
Value offset = rewriter.create<LLVM::URemOp>(loc, subbed, alignmentValue);
- Value aligned = rewriter.create<LLVM::GEPOp>(loc, allocated->getType(),
+ Value aligned = rewriter.create<LLVM::GEPOp>(loc, allocated.getType(),
allocated, offset);
bitcastAligned = rewriter.create<LLVM::BitcastOp>(
loc, elementPtrType, ArrayRef<Value>(aligned));
SmallVector<Value, 4> results;
results.reserve(numResults);
for (unsigned i = 0; i < numResults; ++i) {
- auto type = this->lowering.convertType(op->getResult(i)->getType());
+ auto type = this->lowering.convertType(op->getResult(i).getType());
results.push_back(rewriter.create<LLVM::ExtractValueOp>(
op->getLoc(), type, newOp.getOperation()->getResult(0),
rewriter.getI64ArrayAttr(i)));
OperandAdaptor<TanhOp> transformed(operands);
LLVMTypeT operandType =
- transformed.operand()->getType().dyn_cast_or_null<LLVM::LLVMType>();
+ transformed.operand().getType().dyn_cast_or_null<LLVM::LLVMType>();
if (!operandType)
return matchFailure();
PatternMatchResult match(Operation *op) const override {
auto memRefCastOp = cast<MemRefCastOp>(op);
- Type srcType = memRefCastOp.getOperand()->getType();
+ Type srcType = memRefCastOp.getOperand().getType();
Type dstType = memRefCastOp.getType();
if (srcType.isa<MemRefType>() && dstType.isa<MemRefType>()) {
MemRefType sourceType =
- memRefCastOp.getOperand()->getType().cast<MemRefType>();
+ memRefCastOp.getOperand().getType().cast<MemRefType>();
MemRefType targetType = memRefCastOp.getType().cast<MemRefType>();
return (isSupportedMemRefType(targetType) &&
isSupportedMemRefType(sourceType))
auto memRefCastOp = cast<MemRefCastOp>(op);
OperandAdaptor<MemRefCastOp> transformed(operands);
- auto srcType = memRefCastOp.getOperand()->getType();
+ auto srcType = memRefCastOp.getOperand().getType();
auto dstType = memRefCastOp.getType();
auto targetStructType = lowering.convertType(memRefCastOp.getType());
auto loc = op->getLoc();
ConversionPatternRewriter &rewriter) const override {
auto dimOp = cast<DimOp>(op);
OperandAdaptor<DimOp> transformed(operands);
- MemRefType type = dimOp.getOperand()->getType().cast<MemRefType>();
+ MemRefType type = dimOp.getOperand().getType().cast<MemRefType>();
auto shape = type.getShape();
int64_t index = dimOp.getIndex();
auto indexCastOp = cast<IndexCastOp>(op);
auto targetType =
- this->lowering.convertType(indexCastOp.getResult()->getType())
+ this->lowering.convertType(indexCastOp.getResult().getType())
.cast<LLVM::LLVMType>();
- auto sourceType = transformed.in()->getType().cast<LLVM::LLVMType>();
+ auto sourceType = transformed.in().getType().cast<LLVM::LLVMType>();
unsigned targetBits = targetType.getUnderlyingType()->getIntegerBitWidth();
unsigned sourceBits = sourceType.getUnderlyingType()->getIntegerBitWidth();
CmpIOpOperandAdaptor transformed(operands);
rewriter.replaceOpWithNewOp<LLVM::ICmpOp>(
- op, lowering.convertType(cmpiOp.getResult()->getType()),
+ op, lowering.convertType(cmpiOp.getResult().getType()),
rewriter.getI64IntegerAttr(static_cast<int64_t>(
convertCmpPredicate<LLVM::ICmpPredicate>(cmpiOp.getPredicate()))),
transformed.lhs(), transformed.rhs());
CmpFOpOperandAdaptor transformed(operands);
rewriter.replaceOpWithNewOp<LLVM::FCmpOp>(
- op, lowering.convertType(cmpfOp.getResult()->getType()),
+ op, lowering.convertType(cmpfOp.getResult().getType()),
rewriter.getI64IntegerAttr(static_cast<int64_t>(
convertCmpPredicate<LLVM::FCmpPredicate>(cmpfOp.getPredicate()))),
transformed.lhs(), transformed.rhs());
1 + viewOp.getNumOffsets() + viewOp.getNumSizes()),
operands.end());
- auto sourceMemRefType = viewOp.source()->getType().cast<MemRefType>();
+ auto sourceMemRefType = viewOp.source().getType().cast<MemRefType>();
auto sourceElementTy =
lowering.convertType(sourceMemRefType.getElementType())
.dyn_cast_or_null<LLVM::LLVMType>();
auto indexType = IndexType::get(context);
// Alloca with proper alignment. We do not expect optimizations of this
// alloca op and so we omit allocating at the entry block.
- auto ptrType = operand->getType().cast<LLVM::LLVMType>().getPointerTo();
+ auto ptrType = operand.getType().cast<LLVM::LLVMType>().getPointerTo();
Value one = builder.create<LLVM::ConstantOp>(loc, int64Ty,
IntegerAttr::get(indexType, 1));
Value allocated =
for (auto it : llvm::zip(opOperands, operands)) {
auto operand = std::get<0>(it);
auto llvmOperand = std::get<1>(it);
- if (!operand->getType().isa<MemRefType>() &&
- !operand->getType().isa<UnrankedMemRefType>()) {
+ if (!operand.getType().isa<MemRefType>() &&
+ !operand.getType().isa<UnrankedMemRefType>()) {
promotedOperands.push_back(operand);
continue;
}
matchAndRewrite(StdOp operation, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const override {
auto resultType =
- this->typeConverter.convertType(operation.getResult()->getType());
+ this->typeConverter.convertType(operation.getResult().getType());
rewriter.template replaceOpWithNewOp<SPIRVOp>(
operation, resultType, operands, ArrayRef<NamedAttribute>());
return this->matchSuccess();
PatternMatchResult ConstantIndexOpConversion::matchAndRewrite(
ConstantOp constIndexOp, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const {
- if (!constIndexOp.getResult()->getType().isa<IndexType>()) {
+ if (!constIndexOp.getResult().getType().isa<IndexType>()) {
return matchFailure();
}
// The attribute has index type which is not directly supported in
return matchFailure();
}
auto spirvConstType =
- typeConverter.convertType(constIndexOp.getResult()->getType());
+ typeConverter.convertType(constIndexOp.getResult().getType());
auto spirvConstVal =
rewriter.getIntegerAttr(spirvConstType, constAttr.getInt());
rewriter.replaceOpWithNewOp<spirv::ConstantOp>(constIndexOp, spirvConstType,
switch (cmpFOp.getPredicate()) {
#define DISPATCH(cmpPredicate, spirvOp) \
case cmpPredicate: \
- rewriter.replaceOpWithNewOp<spirvOp>( \
- cmpFOp, cmpFOp.getResult()->getType(), cmpFOpOperands.lhs(), \
- cmpFOpOperands.rhs()); \
+ rewriter.replaceOpWithNewOp<spirvOp>(cmpFOp, cmpFOp.getResult().getType(), \
+ cmpFOpOperands.lhs(), \
+ cmpFOpOperands.rhs()); \
return matchSuccess();
// Ordered.
switch (cmpIOp.getPredicate()) {
#define DISPATCH(cmpPredicate, spirvOp) \
case cmpPredicate: \
- rewriter.replaceOpWithNewOp<spirvOp>( \
- cmpIOp, cmpIOp.getResult()->getType(), cmpIOpOperands.lhs(), \
- cmpIOpOperands.rhs()); \
+ rewriter.replaceOpWithNewOp<spirvOp>(cmpIOp, cmpIOp.getResult().getType(), \
+ cmpIOpOperands.lhs(), \
+ cmpIOpOperands.rhs()); \
return matchSuccess();
DISPATCH(CmpIPredicate::eq, spirv::IEqualOp);
ConversionPatternRewriter &rewriter) const {
LoadOpOperandAdaptor loadOperands(operands);
auto loadPtr = getElementPtr(rewriter, typeConverter, loadOp.getLoc(),
- loadOp.memref()->getType().cast<MemRefType>(),
+ loadOp.memref().getType().cast<MemRefType>(),
loadOperands.memref(), loadOperands.indices());
rewriter.replaceOpWithNewOp<spirv::LoadOp>(loadOp, loadPtr,
/*memory_access =*/nullptr,
StoreOpOperandAdaptor storeOperands(operands);
auto storePtr =
getElementPtr(rewriter, typeConverter, storeOp.getLoc(),
- storeOp.memref()->getType().cast<MemRefType>(),
+ storeOp.memref().getType().cast<MemRefType>(),
storeOperands.memref(), storeOperands.indices());
rewriter.replaceOpWithNewOp<spirv::StoreOp>(storeOp, storePtr,
storeOperands.value(),
PatternMatchResult
LoadOpOfSubViewFolder::matchAndRewrite(LoadOp loadOp,
PatternRewriter &rewriter) const {
- auto subViewOp =
- dyn_cast_or_null<SubViewOp>(loadOp.memref()->getDefiningOp());
+ auto subViewOp = dyn_cast_or_null<SubViewOp>(loadOp.memref().getDefiningOp());
if (!subViewOp) {
return matchFailure();
}
StoreOpOfSubViewFolder::matchAndRewrite(StoreOp storeOp,
PatternRewriter &rewriter) const {
auto subViewOp =
- dyn_cast_or_null<SubViewOp>(storeOp.memref()->getDefiningOp());
+ dyn_cast_or_null<SubViewOp>(storeOp.memref().getDefiningOp());
if (!subViewOp) {
return matchFailure();
}
auto adaptor = vector::ExtractOpOperandAdaptor(operands);
auto extractOp = cast<vector::ExtractOp>(op);
auto vectorType = extractOp.getVectorType();
- auto resultType = extractOp.getResult()->getType();
+ auto resultType = extractOp.getResult().getType();
auto llvmResultType = lowering.convertType(resultType);
auto positionArrayAttr = extractOp.position();
auto loc = op->getLoc();
auto adaptor = vector::OuterProductOpOperandAdaptor(operands);
auto *ctx = op->getContext();
- auto vLHS = adaptor.lhs()->getType().cast<LLVM::LLVMType>();
- auto vRHS = adaptor.rhs()->getType().cast<LLVM::LLVMType>();
+ auto vLHS = adaptor.lhs().getType().cast<LLVM::LLVMType>();
+ auto vRHS = adaptor.rhs().getType().cast<LLVM::LLVMType>();
auto rankLHS = vLHS.getUnderlyingType()->getVectorNumElements();
auto rankRHS = vRHS.getUnderlyingType()->getVectorNumElements();
auto llvmArrayOfVectType = lowering.convertType(
- cast<vector::OuterProductOp>(op).getResult()->getType());
+ cast<vector::OuterProductOp>(op).getResult().getType());
Value desc = rewriter.create<LLVM::UndefOp>(loc, llvmArrayOfVectType);
Value a = adaptor.lhs(), b = adaptor.rhs();
Value acc = adaptor.acc().empty() ? nullptr : adaptor.acc().front();
auto loc = op->getLoc();
vector::TypeCastOp castOp = cast<vector::TypeCastOp>(op);
MemRefType sourceMemRefType =
- castOp.getOperand()->getType().cast<MemRefType>();
+ castOp.getOperand().getType().cast<MemRefType>();
MemRefType targetMemRefType =
- castOp.getResult()->getType().cast<MemRefType>();
+ castOp.getResult().getType().cast<MemRefType>();
// Only static shape casts supported atm.
if (!sourceMemRefType.hasStaticShape() ||
return matchFailure();
auto llvmSourceDescriptorTy =
- operands[0]->getType().dyn_cast<LLVM::LLVMType>();
+ operands[0].getType().dyn_cast<LLVM::LLVMType>();
if (!llvmSourceDescriptorTy || !llvmSourceDescriptorTy.isStructTy())
return matchFailure();
MemRefDescriptor sourceMemRef(operands[0]);
/// symbol.
bool mlir::isTopLevelValue(Value value) {
if (auto arg = value.dyn_cast<BlockArgument>())
- return isFunctionRegion(arg->getOwner()->getParent());
- return isFunctionRegion(value->getDefiningOp()->getParentRegion());
+ return isFunctionRegion(arg.getOwner()->getParent());
+ return isFunctionRegion(value.getDefiningOp()->getParentRegion());
}
// Value can be used as a dimension id if it is valid as a symbol, or
// with dimension id arguments.
bool mlir::isValidDim(Value value) {
// The value must be an index type.
- if (!value->getType().isIndex())
+ if (!value.getType().isIndex())
return false;
- if (auto *op = value->getDefiningOp()) {
+ if (auto *op = value.getDefiningOp()) {
// Top level operation or constant operation is ok.
if (isFunctionRegion(op->getParentRegion()) || isa<ConstantOp>(op))
return true;
return false;
}
// This value has to be a block argument for a FuncOp or an affine.for.
- auto *parentOp = value.cast<BlockArgument>()->getOwner()->getParentOp();
+ auto *parentOp = value.cast<BlockArgument>().getOwner()->getParentOp();
return isa<FuncOp>(parentOp) || isa<AffineForOp>(parentOp);
}
// The dim op is also okay if its operand memref/tensor is a view/subview
// whose corresponding size is a valid symbol.
unsigned index = dimOp.getIndex();
- if (auto viewOp = dyn_cast<ViewOp>(dimOp.getOperand()->getDefiningOp()))
+ if (auto viewOp = dyn_cast<ViewOp>(dimOp.getOperand().getDefiningOp()))
return isMemRefSizeValidSymbol<ViewOp>(viewOp, index);
- if (auto subViewOp = dyn_cast<SubViewOp>(dimOp.getOperand()->getDefiningOp()))
+ if (auto subViewOp = dyn_cast<SubViewOp>(dimOp.getOperand().getDefiningOp()))
return isMemRefSizeValidSymbol<SubViewOp>(subViewOp, index);
- if (auto allocOp = dyn_cast<AllocOp>(dimOp.getOperand()->getDefiningOp()))
+ if (auto allocOp = dyn_cast<AllocOp>(dimOp.getOperand().getDefiningOp()))
return isMemRefSizeValidSymbol<AllocOp>(allocOp, index);
return false;
}
// constraints.
bool mlir::isValidSymbol(Value value) {
// The value must be an index type.
- if (!value->getType().isIndex())
+ if (!value.getType().isIndex())
return false;
- if (auto *op = value->getDefiningOp()) {
+ if (auto *op = value.getDefiningOp()) {
// Top level operation or constant operation is ok.
if (isFunctionRegion(op->getParentRegion()) || isa<ConstantOp>(op))
return true;
return emitOpError("operands must be of type 'index'");
}
- if (!getResult()->getType().isIndex())
+ if (!getResult().getType().isIndex())
return emitOpError("result must be of type 'index'");
// Verify that the map only produces one result.
if (inserted) {
reorderedDims.push_back(v);
}
- return getAffineDimExpr(iterPos->second, v->getContext())
+ return getAffineDimExpr(iterPos->second, v.getContext())
.cast<AffineDimExpr>();
}
indicesFromAffineApplyOp(ArrayRef<Value> operands) {
llvm::SetVector<unsigned> res;
for (auto en : llvm::enumerate(operands))
- if (isa_and_nonnull<AffineApplyOp>(en.value()->getDefiningOp()))
+ if (isa_and_nonnull<AffineApplyOp>(en.value().getDefiningOp()))
res.insert(en.index());
return res;
}
// 1. Only dispatch dims or symbols.
for (auto en : llvm::enumerate(operands)) {
auto t = en.value();
- assert(t->getType().isIndex());
+ assert(t.getType().isIndex());
bool isDim = (en.index() < map.getNumDims());
if (isDim) {
// a. The mathematical composition of AffineMap composes dims.
// 2. Compose AffineApplyOps and dispatch dims or symbols.
for (unsigned i = 0, e = operands.size(); i < e; ++i) {
auto t = operands[i];
- auto affineApply = dyn_cast_or_null<AffineApplyOp>(t->getDefiningOp());
+ auto affineApply = dyn_cast_or_null<AffineApplyOp>(t.getDefiningOp());
if (affineApply) {
// a. Compose affine.apply operations.
LLVM_DEBUG(affineApply.getOperation()->print(
void mlir::fullyComposeAffineMapAndOperands(AffineMap *map,
SmallVectorImpl<Value> *operands) {
while (llvm::any_of(*operands, [](Value v) {
- return isa_and_nonnull<AffineApplyOp>(v->getDefiningOp());
+ return isa_and_nonnull<AffineApplyOp>(v.getDefiningOp());
})) {
composeAffineMapAndOperands(map, operands);
}
static LogicalResult foldMemRefCast(Operation *op) {
bool folded = false;
for (OpOperand &operand : op->getOpOperands()) {
- auto cast = dyn_cast_or_null<MemRefCastOp>(operand.get()->getDefiningOp());
- if (cast && !cast.getOperand()->getType().isa<UnrankedMemRefType>()) {
+ auto cast = dyn_cast_or_null<MemRefCastOp>(operand.get().getDefiningOp());
+ if (cast && !cast.getOperand().getType().isa<UnrankedMemRefType>()) {
operand.set(cast.getOperand());
folded = true;
}
}
void AffineDmaStartOp::print(OpAsmPrinter &p) {
- p << "affine.dma_start " << *getSrcMemRef() << '[';
+ p << "affine.dma_start " << getSrcMemRef() << '[';
p.printAffineMapOfSSAIds(getSrcMapAttr(), getSrcIndices());
- p << "], " << *getDstMemRef() << '[';
+ p << "], " << getDstMemRef() << '[';
p.printAffineMapOfSSAIds(getDstMapAttr(), getDstIndices());
- p << "], " << *getTagMemRef() << '[';
+ p << "], " << getTagMemRef() << '[';
p.printAffineMapOfSSAIds(getTagMapAttr(), getTagIndices());
- p << "], " << *getNumElements();
+ p << "], " << getNumElements();
if (isStrided()) {
- p << ", " << *getStride();
- p << ", " << *getNumElementsPerStride();
+ p << ", " << getStride();
+ p << ", " << getNumElementsPerStride();
}
p << " : " << getSrcMemRefType() << ", " << getDstMemRefType() << ", "
<< getTagMemRefType();
}
LogicalResult AffineDmaStartOp::verify() {
- if (!getOperand(getSrcMemRefOperandIndex())->getType().isa<MemRefType>())
+ if (!getOperand(getSrcMemRefOperandIndex()).getType().isa<MemRefType>())
return emitOpError("expected DMA source to be of memref type");
- if (!getOperand(getDstMemRefOperandIndex())->getType().isa<MemRefType>())
+ if (!getOperand(getDstMemRefOperandIndex()).getType().isa<MemRefType>())
return emitOpError("expected DMA destination to be of memref type");
- if (!getOperand(getTagMemRefOperandIndex())->getType().isa<MemRefType>())
+ if (!getOperand(getTagMemRefOperandIndex()).getType().isa<MemRefType>())
return emitOpError("expected DMA tag to be of memref type");
// DMAs from different memory spaces supported.
}
for (auto idx : getSrcIndices()) {
- if (!idx->getType().isIndex())
+ if (!idx.getType().isIndex())
return emitOpError("src index to dma_start must have 'index' type");
if (!isValidAffineIndexOperand(idx))
return emitOpError("src index must be a dimension or symbol identifier");
}
for (auto idx : getDstIndices()) {
- if (!idx->getType().isIndex())
+ if (!idx.getType().isIndex())
return emitOpError("dst index to dma_start must have 'index' type");
if (!isValidAffineIndexOperand(idx))
return emitOpError("dst index must be a dimension or symbol identifier");
}
for (auto idx : getTagIndices()) {
- if (!idx->getType().isIndex())
+ if (!idx.getType().isIndex())
return emitOpError("tag index to dma_start must have 'index' type");
if (!isValidAffineIndexOperand(idx))
return emitOpError("tag index must be a dimension or symbol identifier");
}
void AffineDmaWaitOp::print(OpAsmPrinter &p) {
- p << "affine.dma_wait " << *getTagMemRef() << '[';
+ p << "affine.dma_wait " << getTagMemRef() << '[';
SmallVector<Value, 2> operands(getTagIndices());
p.printAffineMapOfSSAIds(getTagMapAttr(), operands);
p << "], ";
p.printOperand(getNumElements());
- p << " : " << getTagMemRef()->getType();
+ p << " : " << getTagMemRef().getType();
}
// Parse AffineDmaWaitOp.
}
LogicalResult AffineDmaWaitOp::verify() {
- if (!getOperand(0)->getType().isa<MemRefType>())
+ if (!getOperand(0).getType().isa<MemRefType>())
return emitOpError("expected DMA tag to be of memref type");
for (auto idx : getTagIndices()) {
- if (!idx->getType().isIndex())
+ if (!idx.getType().isIndex())
return emitOpError("index to dma_wait must have 'index' type");
if (!isValidAffineIndexOperand(idx))
return emitOpError("index must be a dimension or symbol identifier");
// Check that the body defines as single block argument for the induction
// variable.
auto *body = op.getBody();
- if (body->getNumArguments() != 1 ||
- !body->getArgument(0)->getType().isIndex())
+ if (body->getNumArguments() != 1 || !body->getArgument(0).getType().isIndex())
return op.emitOpError(
"expected body to have a single index argument for the "
"induction variable");
Region &AffineForOp::getLoopBody() { return region(); }
bool AffineForOp::isDefinedOutsideOfLoop(Value value) {
- return !region().isAncestor(value->getParentRegion());
+ return !region().isAncestor(value.getParentRegion());
}
LogicalResult AffineForOp::moveOutOfLoop(ArrayRef<Operation *> ops) {
/// not an induction variable, then return nullptr.
AffineForOp mlir::getForInductionVarOwner(Value val) {
auto ivArg = val.dyn_cast<BlockArgument>();
- if (!ivArg || !ivArg->getOwner())
+ if (!ivArg || !ivArg.getOwner())
return AffineForOp();
- auto *containingInst = ivArg->getOwner()->getParent()->getParentOp();
+ auto *containingInst = ivArg.getOwner()->getParent()->getParentOp();
return dyn_cast<AffineForOp>(containingInst);
}
result.addOperands(operands);
if (map)
result.addAttribute(getMapAttrName(), AffineMapAttr::get(map));
- auto memrefType = operands[0]->getType().cast<MemRefType>();
+ auto memrefType = operands[0].getType().cast<MemRefType>();
result.types.push_back(memrefType.getElementType());
}
assert(map.getNumInputs() == mapOperands.size() && "inconsistent index info");
result.addOperands(memref);
result.addOperands(mapOperands);
- auto memrefType = memref->getType().cast<MemRefType>();
+ auto memrefType = memref.getType().cast<MemRefType>();
result.addAttribute(getMapAttrName(), AffineMapAttr::get(map));
result.types.push_back(memrefType.getElementType());
}
void AffineLoadOp::build(Builder *builder, OperationState &result, Value memref,
ValueRange indices) {
- auto memrefType = memref->getType().cast<MemRefType>();
+ auto memrefType = memref.getType().cast<MemRefType>();
auto rank = memrefType.getRank();
// Create identity map for memrefs with at least one dimension or () -> ()
// for zero-dimensional memrefs.
}
void AffineLoadOp::print(OpAsmPrinter &p) {
- p << "affine.load " << *getMemRef() << '[';
+ p << "affine.load " << getMemRef() << '[';
if (AffineMapAttr mapAttr = getAttrOfType<AffineMapAttr>(getMapAttrName()))
p.printAffineMapOfSSAIds(mapAttr, getMapOperands());
p << ']';
}
for (auto idx : getMapOperands()) {
- if (!idx->getType().isIndex())
+ if (!idx.getType().isIndex())
return emitOpError("index to load must have 'index' type");
if (!isValidAffineIndexOperand(idx))
return emitOpError("index must be a dimension or symbol identifier");
void AffineStoreOp::build(Builder *builder, OperationState &result,
Value valueToStore, Value memref,
ValueRange indices) {
- auto memrefType = memref->getType().cast<MemRefType>();
+ auto memrefType = memref.getType().cast<MemRefType>();
auto rank = memrefType.getRank();
// Create identity map for memrefs with at least one dimension or () -> ()
// for zero-dimensional memrefs.
}
void AffineStoreOp::print(OpAsmPrinter &p) {
- p << "affine.store " << *getValueToStore();
- p << ", " << *getMemRef() << '[';
+ p << "affine.store " << getValueToStore();
+ p << ", " << getMemRef() << '[';
if (AffineMapAttr mapAttr = getAttrOfType<AffineMapAttr>(getMapAttrName()))
p.printAffineMapOfSSAIds(mapAttr, getMapOperands());
p << ']';
LogicalResult AffineStoreOp::verify() {
// First operand must have same type as memref element type.
- if (getValueToStore()->getType() != getMemRefType().getElementType())
+ if (getValueToStore().getType() != getMemRefType().getElementType())
return emitOpError("first operand must have same type memref element type");
auto mapAttr = getAttrOfType<AffineMapAttr>(getMapAttrName());
}
for (auto idx : getMapOperands()) {
- if (!idx->getType().isIndex())
+ if (!idx.getType().isIndex())
return emitOpError("index to store must have 'index' type");
if (!isValidAffineIndexOperand(idx))
return emitOpError("index must be a dimension or symbol identifier");
}
void print(OpAsmPrinter &p, AffinePrefetchOp op) {
- p << AffinePrefetchOp::getOperationName() << " " << *op.memref() << '[';
+ p << AffinePrefetchOp::getOperationName() << " " << op.memref() << '[';
AffineMapAttr mapAttr = op.getAttrOfType<AffineMapAttr>(op.getMapAttrName());
if (mapAttr) {
SmallVector<Value, 2> operands(op.getMapOperands());
return nullptr;
}
- Type storageType = elementType.castToStorageType(input->getType());
- Type realType = elementType.castToExpressedType(input->getType());
+ Type storageType = elementType.castToStorageType(input.getType());
+ Type realType = elementType.castToExpressedType(input.getType());
Type intermediateType =
castElementType(storageType, IntegerType::get(32, rewriter.getContext()));
assert(storageType && "cannot cast to storage type");
static Value emitDequantize(Location loc, Value input,
PatternRewriter &rewriter) {
- Type inputType = input->getType();
+ Type inputType = input.getType();
QuantizedType qElementType =
QuantizedType::getQuantizedElementType(inputType);
if (auto uperLayerElementType =
PatternMatchResult matchAndRewrite(DequantizeCastOp op,
PatternRewriter &rewriter) const override {
- Type inputType = op.arg()->getType();
- Type outputType = op.getResult()->getType();
+ Type inputType = op.arg().getType();
+ Type outputType = op.getResult().getType();
QuantizedType inputElementType =
QuantizedType::getQuantizedElementType(inputType);
UniformBinaryOpInfo(Operation *op, Value lhs, Value rhs,
Optional<APFloat> clampMin, Optional<APFloat> clampMax)
: op(op), lhs(lhs), rhs(rhs), clampMin(clampMin), clampMax(clampMax),
- lhsType(getUniformElementType(lhs->getType())),
- rhsType(getUniformElementType(rhs->getType())),
+ lhsType(getUniformElementType(lhs.getType())),
+ rhsType(getUniformElementType(rhs.getType())),
resultType(getUniformElementType(*op->result_type_begin())),
- lhsStorageType(quant::QuantizedType::castToStorageType(lhs->getType())),
- rhsStorageType(quant::QuantizedType::castToStorageType(rhs->getType())),
+ lhsStorageType(quant::QuantizedType::castToStorageType(lhs.getType())),
+ rhsStorageType(quant::QuantizedType::castToStorageType(rhs.getType())),
resultStorageType(
quant::QuantizedType::castToStorageType(*op->result_type_begin())) {
}
// to encode target module" has landed.
// auto functionType = kernelFunc.getType();
// for (unsigned i = 0; i < numKernelFuncArgs; ++i) {
- // if (getKernelOperand(i)->getType() != functionType.getInput(i)) {
+ // if (getKernelOperand(i).getType() != functionType.getInput(i)) {
// return emitOpError("type of function argument ")
// << i << " does not match";
// }
if (allReduce.body().front().getNumArguments() != 2)
return allReduce.emitError("expected two region arguments");
for (auto argument : allReduce.body().front().getArguments()) {
- if (argument->getType() != allReduce.getType())
+ if (argument.getType() != allReduce.getType())
return allReduce.emitError("incorrect region argument type");
}
unsigned yieldCount = 0;
if (auto yield = dyn_cast<gpu::YieldOp>(block.getTerminator())) {
if (yield.getNumOperands() != 1)
return allReduce.emitError("expected one gpu.yield operand");
- if (yield.getOperand(0)->getType() != allReduce.getType())
+ if (yield.getOperand(0).getType() != allReduce.getType())
return allReduce.emitError("incorrect gpu.yield type");
++yieldCount;
}
}
static LogicalResult verifyShuffleOp(gpu::ShuffleOp shuffleOp) {
- auto type = shuffleOp.value()->getType();
- if (shuffleOp.result()->getType() != type) {
+ auto type = shuffleOp.value().getType();
+ if (shuffleOp.result().getType() != type) {
return shuffleOp.emitOpError()
<< "requires the same type for value operand and result";
}
}
static void printShuffleOp(OpAsmPrinter &p, ShuffleOp op) {
- p << ShuffleOp::getOperationName() << ' ';
- p.printOperands(op.getOperands());
- p << ' ' << op.mode() << " : ";
- p.printType(op.value()->getType());
+ p << ShuffleOp::getOperationName() << ' ' << op.getOperands() << ' '
+ << op.mode() << " : " << op.value().getType();
}
static ParseResult parseShuffleOp(OpAsmParser &parser, OperationState &state) {
// LaunchOp
//===----------------------------------------------------------------------===//
-static SmallVector<Type, 4> getValueTypes(ValueRange values) {
- SmallVector<Type, 4> types;
- types.reserve(values.size());
- for (Value v : values)
- types.push_back(v->getType());
- return types;
-}
-
void LaunchOp::build(Builder *builder, OperationState &result, Value gridSizeX,
Value gridSizeY, Value gridSizeZ, Value blockSizeX,
Value blockSizeY, Value blockSizeZ, ValueRange operands) {
Block *body = new Block();
body->addArguments(
std::vector<Type>(kNumConfigRegionAttributes, builder->getIndexType()));
- body->addArguments(getValueTypes(operands));
+ body->addArguments(llvm::to_vector<4>(operands.getTypes()));
kernelRegion->push_back(body);
}
// where %size-* and %iter-* will correspond to the body region arguments.
static void printSizeAssignment(OpAsmPrinter &p, KernelDim3 size,
ValueRange operands, KernelDim3 ids) {
- p << '(' << *ids.x << ", " << *ids.y << ", " << *ids.z << ") in (";
- p << *size.x << " = " << *operands[0] << ", ";
- p << *size.y << " = " << *operands[1] << ", ";
- p << *size.z << " = " << *operands[2] << ')';
+ p << '(' << ids.x << ", " << ids.y << ", " << ids.z << ") in (";
+ p << size.x << " = " << operands[0] << ", ";
+ p << size.y << " = " << operands[1] << ", ";
+ p << size.z << " = " << operands[2] << ')';
}
void printLaunchOp(OpAsmPrinter &p, LaunchOp op) {
p << ' ' << op.getArgsKeyword() << '(';
Block *entryBlock = &op.body().front();
interleaveComma(llvm::seq<int>(0, operands.size()), p, [&](int i) {
- p << *entryBlock->getArgument(LaunchOp::kNumConfigRegionAttributes + i)
- << " = " << *operands[i];
+ p << entryBlock->getArgument(LaunchOp::kNumConfigRegionAttributes + i)
+ << " = " << operands[i];
});
p << ") ";
}
for (unsigned i = operands.size(); i > 0; --i) {
unsigned index = i - 1;
Value operand = operands[index];
- if (!isa_and_nonnull<ConstantOp>(operand->getDefiningOp()))
+ if (!isa_and_nonnull<ConstantOp>(operand.getDefiningOp()))
continue;
found = true;
Value internalConstant =
- rewriter.clone(*operand->getDefiningOp())->getResult(0);
+ rewriter.clone(*operand.getDefiningOp())->getResult(0);
Value kernelArg = *std::next(kernelArgs.begin(), index);
- kernelArg->replaceAllUsesWith(internalConstant);
+ kernelArg.replaceAllUsesWith(internalConstant);
launchOp.eraseKernelArgument(index);
}
p << ' ' << keyword << '(';
interleaveComma(values, p,
- [&p](BlockArgument v) { p << *v << " : " << v->getType(); });
+ [&p](BlockArgument v) { p << v << " : " << v.getType(); });
p << ')';
}
ArrayRef<BlockArgument> attributions,
unsigned memorySpace) {
for (Value v : attributions) {
- auto type = v->getType().dyn_cast<MemRefType>();
+ auto type = v.getType().dyn_cast<MemRefType>();
if (!type)
return op->emitOpError() << "expected memref type in attribution";
ArrayRef<Type> funcArgTypes = getType().getInputs();
for (unsigned i = 0; i < numFuncArguments; ++i) {
- Type blockArgType = front().getArgument(i)->getType();
+ Type blockArgType = front().getArgument(i).getType();
if (funcArgTypes[i] != blockArgType)
return emitOpError() << "expected body region argument #" << i
<< " to be of type " << funcArgTypes[i] << ", got "
// Replace the leading 12 function args with the respective thread/block index
// operations. Iterate backwards since args are erased and indices change.
for (int i = 11; i >= 0; --i) {
- firstBlock.getArgument(i)->replaceAllUsesWith(indexOps[i]);
+ firstBlock.getArgument(i).replaceAllUsesWith(indexOps[i]);
firstBlock.eraseArgument(i);
}
}
map.map(launch.getKernelOperand(i), kernelFunc.getArgument(i));
}
for (int i = launch.getNumKernelOperands() - 1; i >= 0; --i) {
- auto operandOp = launch.getKernelOperand(i)->getDefiningOp();
+ auto operandOp = launch.getKernelOperand(i).getDefiningOp();
if (!operandOp || !isInliningBeneficiary(operandOp)) {
newLaunchArgs.push_back(launch.getKernelOperand(i));
continue;
continue;
}
auto clone = kernelBuilder.clone(*operandOp, map);
- firstBlock.getArgument(i)->replaceAllUsesWith(clone->getResult(0));
+ firstBlock.getArgument(i).replaceAllUsesWith(clone->getResult(0));
firstBlock.eraseArgument(i);
}
if (newLaunchArgs.size() == launch.getNumKernelOperands())
SmallVector<Type, 8> newArgumentTypes;
newArgumentTypes.reserve(firstBlock.getNumArguments());
for (auto value : firstBlock.getArguments()) {
- newArgumentTypes.push_back(value->getType());
+ newArgumentTypes.push_back(value.getType());
}
kernelFunc.setType(LaunchBuilder.getFunctionType(newArgumentTypes, {}));
auto newLaunch = LaunchBuilder.create<gpu::LaunchFuncOp>(
//===----------------------------------------------------------------------===//
static void printICmpOp(OpAsmPrinter &p, ICmpOp &op) {
p << op.getOperationName() << " \"" << stringifyICmpPredicate(op.predicate())
- << "\" " << *op.getOperand(0) << ", " << *op.getOperand(1);
+ << "\" " << op.getOperand(0) << ", " << op.getOperand(1);
p.printOptionalAttrDict(op.getAttrs(), {"predicate"});
- p << " : " << op.lhs()->getType();
+ p << " : " << op.lhs().getType();
}
static void printFCmpOp(OpAsmPrinter &p, FCmpOp &op) {
p << op.getOperationName() << " \"" << stringifyFCmpPredicate(op.predicate())
- << "\" " << *op.getOperand(0) << ", " << *op.getOperand(1);
+ << "\" " << op.getOperand(0) << ", " << op.getOperand(1);
p.printOptionalAttrDict(op.getAttrs(), {"predicate"});
- p << " : " << op.lhs()->getType();
+ p << " : " << op.lhs().getType();
}
// <operation> ::= `llvm.icmp` string-literal ssa-use `,` ssa-use
static void printAllocaOp(OpAsmPrinter &p, AllocaOp &op) {
auto elemTy = op.getType().cast<LLVM::LLVMType>().getPointerElementTy();
- auto funcTy = FunctionType::get({op.arraySize()->getType()}, {op.getType()},
+ auto funcTy = FunctionType::get({op.arraySize().getType()}, {op.getType()},
op.getContext());
- p << op.getOperationName() << ' ' << *op.arraySize() << " x " << elemTy;
+ p << op.getOperationName() << ' ' << op.arraySize() << " x " << elemTy;
if (op.alignment().hasValue() && op.alignment()->getSExtValue() != 0)
p.printOptionalAttrDict(op.getAttrs());
else
SmallVector<Type, 8> types(op.getOperandTypes());
auto funcTy = FunctionType::get(types, op.getType(), op.getContext());
- p << op.getOperationName() << ' ' << *op.base() << '['
+ p << op.getOperationName() << ' ' << op.base() << '['
<< op.getOperands().drop_front() << ']';
p.printOptionalAttrDict(op.getAttrs());
p << " : " << funcTy;
//===----------------------------------------------------------------------===//
static void printLoadOp(OpAsmPrinter &p, LoadOp &op) {
- p << op.getOperationName() << ' ' << *op.addr();
+ p << op.getOperationName() << ' ' << op.addr();
p.printOptionalAttrDict(op.getAttrs());
- p << " : " << op.addr()->getType();
+ p << " : " << op.addr().getType();
}
// Extract the pointee type from the LLVM pointer type wrapped in MLIR. Return
//===----------------------------------------------------------------------===//
static void printStoreOp(OpAsmPrinter &p, StoreOp &op) {
- p << op.getOperationName() << ' ' << *op.value() << ", " << *op.addr();
+ p << op.getOperationName() << ' ' << op.value() << ", " << op.addr();
p.printOptionalAttrDict(op.getAttrs());
- p << " : " << op.addr()->getType();
+ p << " : " << op.addr().getType();
}
// <operation> ::= `llvm.store` ssa-use `,` ssa-use attribute-dict? `:` type
if (isDirect)
p.printSymbolName(callee.getValue());
else
- p << *op.getOperand(0);
+ p << op.getOperand(0);
p << '(' << op.getOperands().drop_front(isDirect ? 0 : 1) << ')';
p.printOptionalAttrDict(op.getAttrs(), {"callee"});
void LLVM::ExtractElementOp::build(Builder *b, OperationState &result,
Value vector, Value position,
ArrayRef<NamedAttribute> attrs) {
- auto wrappedVectorType = vector->getType().cast<LLVM::LLVMType>();
+ auto wrappedVectorType = vector.getType().cast<LLVM::LLVMType>();
auto llvmType = wrappedVectorType.getVectorElementType();
build(b, result, llvmType, vector, position);
result.addAttributes(attrs);
}
static void printExtractElementOp(OpAsmPrinter &p, ExtractElementOp &op) {
- p << op.getOperationName() << ' ' << *op.vector() << "[" << *op.position()
- << " : " << op.position()->getType() << "]";
+ p << op.getOperationName() << ' ' << op.vector() << "[" << op.position()
+ << " : " << op.position().getType() << "]";
p.printOptionalAttrDict(op.getAttrs());
- p << " : " << op.vector()->getType();
+ p << " : " << op.vector().getType();
}
// <operation> ::= `llvm.extractelement` ssa-use `, ` ssa-use
//===----------------------------------------------------------------------===//
static void printExtractValueOp(OpAsmPrinter &p, ExtractValueOp &op) {
- p << op.getOperationName() << ' ' << *op.container() << op.position();
+ p << op.getOperationName() << ' ' << op.container() << op.position();
p.printOptionalAttrDict(op.getAttrs(), {"position"});
- p << " : " << op.container()->getType();
+ p << " : " << op.container().getType();
}
// Extract the type at `position` in the wrapped LLVM IR aggregate type
//===----------------------------------------------------------------------===//
static void printInsertElementOp(OpAsmPrinter &p, InsertElementOp &op) {
- p << op.getOperationName() << ' ' << *op.value() << ", " << *op.vector()
- << "[" << *op.position() << " : " << op.position()->getType() << "]";
+ p << op.getOperationName() << ' ' << op.value() << ", " << op.vector() << "["
+ << op.position() << " : " << op.position().getType() << "]";
p.printOptionalAttrDict(op.getAttrs());
- p << " : " << op.vector()->getType();
+ p << " : " << op.vector().getType();
}
// <operation> ::= `llvm.insertelement` ssa-use `,` ssa-use `,` ssa-use
//===----------------------------------------------------------------------===//
static void printInsertValueOp(OpAsmPrinter &p, InsertValueOp &op) {
- p << op.getOperationName() << ' ' << *op.value() << ", " << *op.container()
+ p << op.getOperationName() << ' ' << op.value() << ", " << op.container()
<< op.position();
p.printOptionalAttrDict(op.getAttrs(), {"position"});
- p << " : " << op.container()->getType();
+ p << " : " << op.container().getType();
}
// <operation> ::= `llvm.insertvaluevalue` ssa-use `,` ssa-use
//===----------------------------------------------------------------------===//
static void printSelectOp(OpAsmPrinter &p, SelectOp &op) {
- p << op.getOperationName() << ' ' << *op.condition() << ", "
- << *op.trueValue() << ", " << *op.falseValue();
+ p << op.getOperationName() << ' ' << op.condition() << ", " << op.trueValue()
+ << ", " << op.falseValue();
p.printOptionalAttrDict(op.getAttrs());
- p << " : " << op.condition()->getType() << ", " << op.trueValue()->getType();
+ p << " : " << op.condition().getType() << ", " << op.trueValue().getType();
}
// <operation> ::= `llvm.select` ssa-use `,` ssa-use `,` ssa-use
//===----------------------------------------------------------------------===//
static void printCondBrOp(OpAsmPrinter &p, CondBrOp &op) {
- p << op.getOperationName() << ' ' << *op.getOperand(0) << ", ";
+ p << op.getOperationName() << ' ' << op.getOperand(0) << ", ";
p.printSuccessorAndUseList(op.getOperation(), 0);
p << ", ";
p.printSuccessorAndUseList(op.getOperation(), 1);
if (op.getNumOperands() == 0)
return;
- p << ' ' << *op.getOperand(0) << " : " << op.getOperand(0)->getType();
+ p << ' ' << op.getOperand(0) << " : " << op.getOperand(0).getType();
}
// <operation> ::= `llvm.return` ssa-use-list attribute-dict? `:`
static void printUndefOp(OpAsmPrinter &p, UndefOp &op) {
p << op.getOperationName();
p.printOptionalAttrDict(op.getAttrs());
- p << " : " << op.res()->getType();
+ p << " : " << op.res().getType();
}
// <operation> ::= `llvm.mlir.undef` attribute-dict? : type
static void printAddressOfOp(OpAsmPrinter &p, AddressOfOp op) {
p << op.getOperationName() << " @" << op.global_name();
p.printOptionalAttrDict(op.getAttrs(), {"global_name"});
- p << " : " << op.getResult()->getType();
+ p << " : " << op.getResult().getType();
}
static ParseResult parseAddressOfOp(OpAsmParser &parser,
"must reference a global defined by 'llvm.mlir.global'");
if (global.getType().getPointerTo(global.addr_space().getZExtValue()) !=
- op.getResult()->getType())
+ op.getResult().getType())
return op.emitOpError(
"the type must be a pointer to the type of the referred global");
static void printConstantOp(OpAsmPrinter &p, ConstantOp &op) {
p << op.getOperationName() << '(' << op.value() << ')';
p.printOptionalAttrDict(op.getAttrs(), {"value"});
- p << " : " << op.res()->getType();
+ p << " : " << op.res().getType();
}
// <operation> ::= `llvm.mlir.constant` `(` attribute `)` attribute-list? : type
void LLVM::ShuffleVectorOp::build(Builder *b, OperationState &result, Value v1,
Value v2, ArrayAttr mask,
ArrayRef<NamedAttribute> attrs) {
- auto wrappedContainerType1 = v1->getType().cast<LLVM::LLVMType>();
+ auto wrappedContainerType1 = v1.getType().cast<LLVM::LLVMType>();
auto vType = LLVMType::getVectorTy(
wrappedContainerType1.getVectorElementType(), mask.size());
build(b, result, vType, v1, v2, mask);
}
static void printShuffleVectorOp(OpAsmPrinter &p, ShuffleVectorOp &op) {
- p << op.getOperationName() << ' ' << *op.v1() << ", " << *op.v2() << " "
+ p << op.getOperationName() << ' ' << op.v1() << ", " << op.v2() << " "
<< op.mask();
p.printOptionalAttrDict(op.getAttrs(), {"mask"});
- p << " : " << op.v1()->getType() << ", " << op.v2()->getType();
+ p << " : " << op.v1().getType() << ", " << op.v2().getType();
}
// <operation> ::= `llvm.shufflevector` ssa-use `, ` ssa-use
unsigned numArguments = funcType->getNumParams();
Block &entryBlock = op.front();
for (unsigned i = 0; i < numArguments; ++i) {
- Type argType = entryBlock.getArgument(i)->getType();
+ Type argType = entryBlock.getArgument(i).getType();
auto argLLVMType = argType.dyn_cast<LLVMType>();
if (!argLLVMType)
return op.emitOpError("entry block argument #")
auto it = aliases.find(v);
if (it != aliases.end()) {
- assert(it->getSecond()->getType().isa<MemRefType>() && "Memref expected");
+ assert(it->getSecond().getType().isa<MemRefType>() && "Memref expected");
return it->getSecond();
}
while (true) {
if (v.isa<BlockArgument>())
return v;
- if (auto alloc = dyn_cast_or_null<AllocOp>(v->getDefiningOp())) {
+ if (auto alloc = dyn_cast_or_null<AllocOp>(v.getDefiningOp())) {
if (isStrided(alloc.getType()))
return alloc.getResult();
}
- if (auto slice = dyn_cast_or_null<SliceOp>(v->getDefiningOp())) {
+ if (auto slice = dyn_cast_or_null<SliceOp>(v.getDefiningOp())) {
auto it = aliases.insert(std::make_pair(v, find(slice.view())));
return it.first->second;
}
- if (auto view = dyn_cast_or_null<ViewOp>(v->getDefiningOp())) {
+ if (auto view = dyn_cast_or_null<ViewOp>(v.getDefiningOp())) {
auto it = aliases.insert(std::make_pair(v, view.source()));
return it.first->second;
}
- if (auto view = dyn_cast_or_null<SubViewOp>(v->getDefiningOp())) {
+ if (auto view = dyn_cast_or_null<SubViewOp>(v.getDefiningOp())) {
v = view.source();
continue;
}
- llvm::errs() << "View alias analysis reduces to: " << *v << "\n";
+ llvm::errs() << "View alias analysis reduces to: " << v << "\n";
llvm_unreachable("unsupported view alias case");
}
}
auto *op = dependence.dependentOpView.op;
LLVM_DEBUG(dbgs() << "\n***Found covering dependence of type "
<< toStringRef(dt) << ": " << *src << " -> " << *op
- << " on " << *dependence.indexingView);
+ << " on " << dependence.indexingView);
res.push_back(op);
}
}
for (auto it : llvm::enumerate(values))
blockTypes.push_back((it.index() < nViews)
? getElementTypeOrSelf(it.value())
- : it.value()->getType());
+ : it.value().getType());
assert(op->getRegions().front().empty());
op->getRegions().front().push_front(new Block);
for (unsigned i = 0; i < nViews; ++i) {
auto viewType = op.getShapedType(i);
- if (viewType.getElementType() != block.getArgument(i)->getType())
+ if (viewType.getElementType() != block.getArgument(i).getType())
return op.emitOpError("expected block argument ")
<< i << " of the same type as elemental type of "
<< ((i < nInputViews) ? "input " : "output ")
"number of loops");
for (unsigned i = 0; i < nLoops; ++i) {
- if (!block.getArgument(i)->getType().isIndex())
+ if (!block.getArgument(i).getType().isIndex())
return op.emitOpError("expected block argument ")
<< i << " to be of IndexType";
}
unsigned memrefArgIndex = i + nLoops;
auto viewType = op.getShapedType(i);
if (viewType.getElementType() !=
- block.getArgument(memrefArgIndex)->getType())
+ block.getArgument(memrefArgIndex).getType())
return op.emitOpError("expected block argument ")
<< memrefArgIndex << " of the same type as elemental type of "
<< ((i < nInputViews) ? "input " : "output ")
//===----------------------------------------------------------------------===//
static void print(OpAsmPrinter &p, RangeOp op) {
- p << op.getOperationName() << " " << *op.min() << ":" << *op.max() << ":"
- << *op.step();
+ p << op.getOperationName() << " " << op.min() << ":" << op.max() << ":"
+ << op.step();
p.printOptionalAttrDict(op.getAttrs());
- p << " : " << op.getResult()->getType();
+ p << " : " << op.getResult().getType();
}
static ParseResult parseRangeOp(OpAsmParser &parser, OperationState &result) {
result.addOperands(base);
result.addOperands(indexings);
- auto memRefType = base->getType().cast<MemRefType>();
+ auto memRefType = base.getType().cast<MemRefType>();
int64_t offset;
SmallVector<int64_t, 4> strides;
auto res = getStridesAndOffset(memRefType, strides, offset);
static void print(OpAsmPrinter &p, SliceOp op) {
auto indexings = op.indexings();
- p << SliceOp::getOperationName() << " " << *op.view() << "[" << indexings
+ p << SliceOp::getOperationName() << " " << op.view() << "[" << indexings
<< "] ";
p.printOptionalAttrDict(op.getAttrs());
p << " : " << op.getBaseViewType();
<< rank << " indexings, got " << llvm::size(op.indexings());
unsigned index = 0;
for (auto indexing : op.indexings()) {
- if (indexing->getType().isa<IndexType>())
+ if (indexing.getType().isa<IndexType>())
--rank;
++index;
}
auto permutationMap = permutation.getValue();
assert(permutationMap);
- auto memRefType = view->getType().cast<MemRefType>();
+ auto memRefType = view.getType().cast<MemRefType>();
auto rank = memRefType.getRank();
auto originalSizes = memRefType.getShape();
// Compute permuted sizes.
}
static void print(OpAsmPrinter &p, TransposeOp op) {
- p << op.getOperationName() << " " << *op.view() << " " << op.permutation();
+ p << op.getOperationName() << " " << op.view() << " " << op.permutation();
p.printOptionalAttrDict(op.getAttrs(),
{TransposeOp::getPermutationAttrName()});
- p << " : " << op.view()->getType();
+ p << " : " << op.view().getType();
}
static ParseResult parseTransposeOp(OpAsmParser &parser,
for (unsigned i = 0; i != nOutputViews; ++i) {
auto elementType = genericOp.getOutputShapedType(i).getElementType();
- if (op.getOperand(i)->getType() != elementType)
+ if (op.getOperand(i).getType() != elementType)
return op.emitOpError("type of return operand ")
- << i << " (" << op.getOperand(i)->getType()
+ << i << " (" << op.getOperand(i).getType()
<< ") doesn't match view element type (" << elementType << ")";
}
return success();
static LogicalResult verify(FillOp op) {
auto viewType = op.getOutputShapedType(0);
- auto fillType = op.value()->getType();
+ auto fillType = op.value().getType();
if (viewType.getElementType() != fillType)
return op.emitOpError("expects fill type to match view elemental type");
return success();
}
static LogicalResult verify(ConvOp op) {
- auto oType = op.output()->getType().cast<MemRefType>();
- auto fType = op.filter()->getType().cast<MemRefType>();
- auto iType = op.input()->getType().cast<MemRefType>();
+ auto oType = op.output().getType().cast<MemRefType>();
+ auto fType = op.filter().getType().cast<MemRefType>();
+ auto iType = op.input().getType().cast<MemRefType>();
if (oType.getElementType() != iType.getElementType() ||
oType.getElementType() != fType.getElementType())
return op.emitOpError("expects memref elemental types to match");
if (loopDepth == en2.value().cast<AffineDimExpr>().getPosition()) {
LLVM_DEBUG(dbgs() << "getViewDefiningLoopRange loopDepth: " << loopDepth
<< "\n");
- LLVM_DEBUG(dbgs() << "getViewDefiningLoopRange view: " << *view
- << "\n");
+ LLVM_DEBUG(dbgs() << "getViewDefiningLoopRange view: " << view << "\n");
return ViewDimension{view, static_cast<unsigned>(en2.index())};
}
}
unsigned consumerIdx, unsigned producerIdx,
OperationFolder *folder) {
auto subView = dyn_cast_or_null<SubViewOp>(
- consumer.getInput(consumerIdx)->getDefiningOp());
- auto slice = dyn_cast_or_null<SliceOp>(
- consumer.getInput(consumerIdx)->getDefiningOp());
+ consumer.getInput(consumerIdx).getDefiningOp());
+ auto slice =
+ dyn_cast_or_null<SliceOp>(consumer.getInput(consumerIdx).getDefiningOp());
assert(subView || slice);
(void)subView;
(void)slice;
auto producerIdx = producer.getIndexOfOutput(producedView).getValue();
// `consumerIdx` and `producerIdx` exist by construction.
LLVM_DEBUG(dbgs() << "\nRAW producer: " << *producer.getOperation()
- << " view: " << *producedView
+ << " view: " << producedView
<< " output index: " << producerIdx);
// Must be a subview or a slice to guarantee there are loops we can fuse
// into.
- auto subView = dyn_cast_or_null<SubViewOp>(consumedView->getDefiningOp());
- auto slice = dyn_cast_or_null<SliceOp>(consumedView->getDefiningOp());
+ auto subView = dyn_cast_or_null<SubViewOp>(consumedView.getDefiningOp());
+ auto slice = dyn_cast_or_null<SliceOp>(consumedView.getDefiningOp());
if (!subView && !slice) {
LLVM_DEBUG(dbgs() << "\nNot fusable (not a subview or slice)");
continue;
// TODO(ntv): non-identity layout.
auto isStaticMemRefWithIdentityLayout = [](Value v) {
- auto m = v->getType().dyn_cast<MemRefType>();
+ auto m = v.getType().dyn_cast<MemRefType>();
if (!m || !m.hasStaticShape() || !m.getAffineMaps().empty())
return false;
return true;
LinalgOp linOp = cast<LinalgOp>(op);
SetVector<Value> subViews;
for (auto it : linOp.getInputsAndOutputs())
- if (auto sv = dyn_cast_or_null<SubViewOp>(it->getDefiningOp()))
+ if (auto sv = dyn_cast_or_null<SubViewOp>(it.getDefiningOp()))
subViews.insert(sv);
if (!subViews.empty()) {
promoteSubViewOperands(rewriter, linOp, subViews);
llvm::cl::cat(clOptionsCategory), llvm::cl::init(false));
static Value allocBuffer(Type elementType, Value size, bool dynamicBuffers) {
- auto *ctx = size->getContext();
+ auto *ctx = size.getContext();
auto width = llvm::divideCeil(elementType.getIntOrFloatBitWidth(), 8);
if (!dynamicBuffers)
- if (auto cst = dyn_cast_or_null<ConstantIndexOp>(size->getDefiningOp()))
+ if (auto cst = dyn_cast_or_null<ConstantIndexOp>(size.getDefiningOp()))
return alloc(
MemRefType::get(width * cst.getValue(), IntegerType::get(8, ctx)));
Value mul = muli(constant_index(width), size);
res.reserve(subViews.size());
DenseMap<Value, PromotionInfo> promotionInfoMap;
for (auto v : subViews) {
- SubViewOp subView = cast<SubViewOp>(v->getDefiningOp());
+ SubViewOp subView = cast<SubViewOp>(v.getDefiningOp());
auto viewType = subView.getType();
// TODO(ntv): support more cases than just float.
if (!viewType.getElementType().isa<FloatType>())
}
for (auto v : subViews) {
- SubViewOp subView = cast<SubViewOp>(v->getDefiningOp());
+ SubViewOp subView = cast<SubViewOp>(v.getDefiningOp());
auto info = promotionInfoMap.find(v);
if (info == promotionInfoMap.end())
continue;
auto info = promotionInfoMap.find(v);
if (info == promotionInfoMap.end())
continue;
- copy(cast<SubViewOp>(v->getDefiningOp()), info->second.partialLocalView);
+ copy(cast<SubViewOp>(v.getDefiningOp()), info->second.partialLocalView);
}
return res;
}
SetVector<Value> subViews;
OpBuilder b(op);
for (auto it : op.getInputsAndOutputs())
- if (auto sv = dyn_cast_or_null<SubViewOp>(it->getDefiningOp()))
+ if (auto sv = dyn_cast_or_null<SubViewOp>(it.getDefiningOp()))
subViews.insert(sv);
if (!subViews.empty()) {
promoteSubViewOperands(b, op, subViews, dynamicBuffers, &folder);
llvm::cl::cat(clOptionsCategory));
static bool isZero(Value v) {
- return isa_and_nonnull<ConstantIndexOp>(v->getDefiningOp()) &&
- cast<ConstantIndexOp>(v->getDefiningOp()).getValue() == 0;
+ return isa_and_nonnull<ConstantIndexOp>(v.getDefiningOp()) &&
+ cast<ConstantIndexOp>(v.getDefiningOp()).getValue() == 0;
}
using LoopIndexToRangeIndexMap = DenseMap<int, int>;
// variable and replace all uses of the previous value.
Value newIndex = b.create<AddIOp>(indexedGenericOp.getLoc(), oldIndex,
pivs[rangeIndex->second]->getValue());
- for (auto &use : oldIndex->getUses()) {
- if (use.getOwner() == newIndex->getDefiningOp())
+ for (auto &use : oldIndex.getUses()) {
+ if (use.getOwner() == newIndex.getDefiningOp())
continue;
use.set(newIndex);
}
for (unsigned viewIndex = 0; viewIndex < linalgOp.getNumInputsAndOutputs();
++viewIndex) {
Value view = *(viewIteratorBegin + viewIndex);
- unsigned rank = view->getType().cast<MemRefType>().getRank();
+ unsigned rank = view.getType().cast<MemRefType>().getRank();
auto map = loopToOperandRangesMaps(linalgOp)[viewIndex];
// If the view is not tiled, we can use it as is.
if (!isTiled(map, tileSizes)) {
// defined.
if (folder)
for (auto v : llvm::concat<Value>(lbs, subViewSizes))
- if (v->use_empty())
- v->getDefiningOp()->erase();
+ if (v.use_empty())
+ v.getDefiningOp()->erase();
return res;
}
mlir::edsc::LoopRangeBuilder::LoopRangeBuilder(ValueHandle *iv,
ValueHandle range) {
assert(range.getType() && "expected !linalg.range type");
- assert(range.getValue()->getDefiningOp() &&
+ assert(range.getValue().getDefiningOp() &&
"need operations to extract range parts");
- auto rangeOp = cast<RangeOp>(range.getValue()->getDefiningOp());
+ auto rangeOp = cast<RangeOp>(range.getValue().getDefiningOp());
auto lb = rangeOp.min();
auto ub = rangeOp.max();
auto step = rangeOp.step();
res.reserve(nOperands);
for (unsigned i = 0; i < nOperands; ++i) {
res.push_back(op->getOperand(numViews + i));
- auto t = res.back()->getType();
+ auto t = res.back().getType();
(void)t;
assert((t.isIntOrIndexOrFloat() || t.isa<VectorType>()) &&
"expected scalar or vector type");
}
LogicalResult verify(ForOp op) {
- if (auto cst = dyn_cast_or_null<ConstantIndexOp>(op.step()->getDefiningOp()))
+ if (auto cst = dyn_cast_or_null<ConstantIndexOp>(op.step().getDefiningOp()))
if (cst.getValue() <= 0)
return op.emitOpError("constant step operand must be positive");
// Check that the body defines as single block argument for the induction
// variable.
auto *body = op.getBody();
- if (body->getNumArguments() != 1 ||
- !body->getArgument(0)->getType().isIndex())
+ if (body->getNumArguments() != 1 || !body->getArgument(0).getType().isIndex())
return op.emitOpError("expected body to have a single index argument for "
"the induction variable");
return success();
}
static void print(OpAsmPrinter &p, ForOp op) {
- p << op.getOperationName() << " " << *op.getInductionVar() << " = "
- << *op.lowerBound() << " to " << *op.upperBound() << " step " << *op.step();
+ p << op.getOperationName() << " " << op.getInductionVar() << " = "
+ << op.lowerBound() << " to " << op.upperBound() << " step " << op.step();
p.printRegion(op.region(),
/*printEntryBlockArgs=*/false,
/*printBlockTerminators=*/false);
Region &ForOp::getLoopBody() { return region(); }
bool ForOp::isDefinedOutsideOfLoop(Value value) {
- return !region().isAncestor(value->getParentRegion());
+ return !region().isAncestor(value.getParentRegion());
}
LogicalResult ForOp::moveOutOfLoop(ArrayRef<Operation *> ops) {
- for (auto *op : ops)
+ for (auto op : ops)
op->moveBefore(this->getOperation());
return success();
}
auto ivArg = val.dyn_cast<BlockArgument>();
if (!ivArg)
return ForOp();
- assert(ivArg->getOwner() && "unlinked block argument");
- auto *containingInst = ivArg->getOwner()->getParentOp();
+ assert(ivArg.getOwner() && "unlinked block argument");
+ auto *containingInst = ivArg.getOwner()->getParentOp();
return dyn_cast_or_null<ForOp>(containingInst);
}
}
static void print(OpAsmPrinter &p, IfOp op) {
- p << IfOp::getOperationName() << " " << *op.condition();
+ p << IfOp::getOperationName() << " " << op.condition();
p.printRegion(op.thenRegion(),
/*printEntryBlockArgs=*/false,
/*printBlockTerminators=*/false);
OpFoldResult StorageCastOp::fold(ArrayRef<Attribute> operands) {
/// Matches x -> [scast -> scast] -> y, replacing the second scast with the
/// value of x if the casts invert each other.
- auto srcScastOp = dyn_cast_or_null<StorageCastOp>(arg()->getDefiningOp());
- if (!srcScastOp || srcScastOp.arg()->getType() != getType())
+ auto srcScastOp = dyn_cast_or_null<StorageCastOp>(arg().getDefiningOp());
+ if (!srcScastOp || srcScastOp.arg().getType() != getType())
return OpFoldResult();
return srcScastOp.arg();
}
// Does the qbarrier convert to a quantized type. This will not be true
// if a quantized type has not yet been chosen or if the cast to an equivalent
// storage type is not supported.
- Type qbarrierResultType = qbarrier.getResult()->getType();
+ Type qbarrierResultType = qbarrier.getResult().getType();
QuantizedType quantizedElementType =
QuantizedType::getQuantizedElementType(qbarrierResultType);
if (!quantizedElementType) {
// type? This will not be true if the qbarrier is superfluous (converts
// from and to a quantized type).
if (!quantizedElementType.isCompatibleExpressedType(
- qbarrier.arg()->getType())) {
+ qbarrier.arg().getType())) {
return matchFailure();
}
// When creating the new const op, use a fused location that combines the
// original const and the qbarrier that led to the quantization.
auto fusedLoc = FusedLoc::get(
- {qbarrier.arg()->getDefiningOp()->getLoc(), qbarrier.getLoc()},
+ {qbarrier.arg().getDefiningOp()->getLoc(), qbarrier.getLoc()},
rewriter.getContext());
auto newConstOp =
rewriter.create<ConstantOp>(fusedLoc, newConstValueType, newConstValue);
// Replace the values directly with the return operands.
assert(valuesToRepl.size() == 1 &&
"spv.ReturnValue expected to only handle one result");
- valuesToRepl.front()->replaceAllUsesWith(retValOp.value());
+ valuesToRepl.front().replaceAllUsesWith(retValOp.value());
}
};
} // namespace
static LogicalResult verifyCastOp(Operation *op,
bool requireSameBitWidth = true) {
- Type operandType = op->getOperand(0)->getType();
- Type resultType = op->getResult(0)->getType();
+ Type operandType = op->getOperand(0).getType();
+ Type resultType = op->getResult(0).getType();
// ODS checks that result type and operand type have the same shape.
if (auto vectorType = operandType.dyn_cast<VectorType>()) {
//
// TODO(ravishankarm): Check that the value type satisfies restrictions of
// SPIR-V OpLoad/OpStore operations
- if (val->getType() !=
- ptr->getType().cast<spirv::PointerType>().getPointeeType()) {
+ if (val.getType() !=
+ ptr.getType().cast<spirv::PointerType>().getPointeeType()) {
return op.emitOpError("mismatch in result type and pointer type");
}
return success();
}
static LogicalResult verifyBitFieldExtractOp(Operation *op) {
- if (op->getOperand(0)->getType() != op->getResult(0)->getType()) {
+ if (op->getOperand(0).getType() != op->getResult(0).getType()) {
return op->emitError("expected the same type for the first operand and "
"result, but provided ")
- << op->getOperand(0)->getType() << " and "
- << op->getResult(0)->getType();
+ << op->getOperand(0).getType() << " and "
+ << op->getResult(0).getType();
}
return success();
}
printer << spirv::stringifyMemorySemantics(
static_cast<spirv::MemorySemantics>(
memorySemanticsAttr.getInt()))
- << "\" " << op->getOperands() << " : "
- << op->getOperand(0)->getType();
+ << "\" " << op->getOperands() << " : " << op->getOperand(0).getType();
}
// Verifies an atomic update op.
static LogicalResult verifyAtomicUpdateOp(Operation *op) {
- auto ptrType = op->getOperand(0)->getType().cast<spirv::PointerType>();
+ auto ptrType = op->getOperand(0).getType().cast<spirv::PointerType>();
auto elementType = ptrType.getPointeeType();
if (!elementType.isa<IntegerType>())
return op->emitOpError(
<< elementType;
if (op->getNumOperands() > 1) {
- auto valueType = op->getOperand(1)->getType();
+ auto valueType = op->getOperand(1).getType();
if (valueType != elementType)
return op->emitOpError("expected value to have the same type as the "
"pointer operand's pointee type ")
}
static void printUnaryOp(Operation *unaryOp, OpAsmPrinter &printer) {
- printer << unaryOp->getName() << ' ' << *unaryOp->getOperand(0) << " : "
- << unaryOp->getOperand(0)->getType();
+ printer << unaryOp->getName() << ' ' << unaryOp->getOperand(0) << " : "
+ << unaryOp->getOperand(0).getType();
}
/// Result of a logical op must be a scalar or vector of boolean type.
static void printLogicalOp(Operation *logicalOp, OpAsmPrinter &printer) {
printer << logicalOp->getName() << ' ' << logicalOp->getOperands() << " : "
- << logicalOp->getOperand(0)->getType();
+ << logicalOp->getOperand(0).getType();
}
static ParseResult parseShiftOp(OpAsmParser &parser, OperationState &state) {
static void printShiftOp(Operation *op, OpAsmPrinter &printer) {
Value base = op->getOperand(0);
Value shift = op->getOperand(1);
- printer << op->getName() << ' ' << *base << ", " << *shift << " : "
- << base->getType() << ", " << shift->getType();
+ printer << op->getName() << ' ' << base << ", " << shift << " : "
+ << base.getType() << ", " << shift.getType();
}
static LogicalResult verifyShiftOp(Operation *op) {
- if (op->getOperand(0)->getType() != op->getResult(0)->getType()) {
+ if (op->getOperand(0).getType() != op->getResult(0).getType()) {
return op->emitError("expected the same type for the first operand and "
"result, but provided ")
- << op->getOperand(0)->getType() << " and "
- << op->getResult(0)->getType();
+ << op->getOperand(0).getType() << " and "
+ << op->getResult(0).getType();
}
return success();
}
}
index = 0;
if (resultType.isa<spirv::StructType>()) {
- Operation *op = indexSSA->getDefiningOp();
+ Operation *op = indexSSA.getDefiningOp();
if (!op) {
emitError(baseLoc, "'spv.AccessChain' op index must be an "
"integer spv.constant to access "
void spirv::AccessChainOp::build(Builder *builder, OperationState &state,
Value basePtr, ValueRange indices) {
- auto type = getElementPtrType(basePtr->getType(), indices, state.location);
+ auto type = getElementPtrType(basePtr.getType(), indices, state.location);
assert(type && "Unable to deduce return type based on basePtr and indices");
build(builder, state, type, basePtr, indices);
}
}
static void print(spirv::AccessChainOp op, OpAsmPrinter &printer) {
- printer << spirv::AccessChainOp::getOperationName() << ' ' << *op.base_ptr()
- << '[' << op.indices() << "] : " << op.base_ptr()->getType();
+ printer << spirv::AccessChainOp::getOperationName() << ' ' << op.base_ptr()
+ << '[' << op.indices() << "] : " << op.base_ptr().getType();
}
static LogicalResult verify(spirv::AccessChainOp accessChainOp) {
SmallVector<Value, 4> indices(accessChainOp.indices().begin(),
accessChainOp.indices().end());
- auto resultType = getElementPtrType(accessChainOp.base_ptr()->getType(),
+ auto resultType = getElementPtrType(accessChainOp.base_ptr().getType(),
indices, accessChainOp.getLoc());
if (!resultType) {
return failure();
PatternMatchResult matchAndRewrite(spirv::AccessChainOp accessChainOp,
PatternRewriter &rewriter) const override {
auto parentAccessChainOp = dyn_cast_or_null<spirv::AccessChainOp>(
- accessChainOp.base_ptr()->getDefiningOp());
+ accessChainOp.base_ptr().getDefiningOp());
if (!parentAccessChainOp) {
return matchFailure();
printer.printSymbolName(addressOfOp.variable());
// Print the type.
- printer << " : " << addressOfOp.pointer()->getType();
+ printer << " : " << addressOfOp.pointer().getType();
}
static LogicalResult verify(spirv::AddressOfOp addressOfOp) {
if (!varOp) {
return addressOfOp.emitOpError("expected spv.globalVariable symbol");
}
- if (addressOfOp.pointer()->getType() != varOp.type()) {
+ if (addressOfOp.pointer().getType() != varOp.type()) {
return addressOfOp.emitOpError(
"result type mismatch with the referenced global variable's type");
}
<< stringifyScope(atomOp.memory_scope()) << "\" \""
<< stringifyMemorySemantics(atomOp.equal_semantics()) << "\" \""
<< stringifyMemorySemantics(atomOp.unequal_semantics()) << "\" "
- << atomOp.getOperands() << " : " << atomOp.pointer()->getType();
+ << atomOp.getOperands() << " : " << atomOp.pointer().getType();
}
static LogicalResult verify(spirv::AtomicCompareExchangeWeakOp atomOp) {
// "The type of Value must be the same as Result Type. The type of the value
// pointed to by Pointer must be the same as Result Type. This type must also
// match the type of Comparator."
- if (atomOp.getType() != atomOp.value()->getType())
+ if (atomOp.getType() != atomOp.value().getType())
return atomOp.emitOpError("value operand must have the same type as the op "
"result, but found ")
- << atomOp.value()->getType() << " vs " << atomOp.getType();
+ << atomOp.value().getType() << " vs " << atomOp.getType();
- if (atomOp.getType() != atomOp.comparator()->getType())
+ if (atomOp.getType() != atomOp.comparator().getType())
return atomOp.emitOpError(
"comparator operand must have the same type as the op "
"result, but found ")
- << atomOp.comparator()->getType() << " vs " << atomOp.getType();
+ << atomOp.comparator().getType() << " vs " << atomOp.getType();
Type pointeeType =
- atomOp.pointer()->getType().cast<spirv::PointerType>().getPointeeType();
+ atomOp.pointer().getType().cast<spirv::PointerType>().getPointeeType();
if (atomOp.getType() != pointeeType)
return atomOp.emitOpError(
"pointer operand's pointee type must have the same "
static LogicalResult verify(spirv::BitcastOp bitcastOp) {
// TODO: The SPIR-V spec validation rules are different for different
// versions.
- auto operandType = bitcastOp.operand()->getType();
- auto resultType = bitcastOp.result()->getType();
+ auto operandType = bitcastOp.operand().getType();
+ auto resultType = bitcastOp.result().getType();
if (operandType == resultType) {
return bitcastOp.emitError(
"result type must be different from operand type");
OpAsmPrinter &printer) {
printer << spirv::BitFieldInsertOp::getOperationName() << ' '
<< bitFieldInsertOp.getOperands() << " : "
- << bitFieldInsertOp.base()->getType() << ", "
- << bitFieldInsertOp.offset()->getType() << ", "
- << bitFieldInsertOp.count()->getType();
+ << bitFieldInsertOp.base().getType() << ", "
+ << bitFieldInsertOp.offset().getType() << ", "
+ << bitFieldInsertOp.count().getType();
}
static LogicalResult verify(spirv::BitFieldInsertOp bitFieldOp) {
- auto baseType = bitFieldOp.base()->getType();
- auto insertType = bitFieldOp.insert()->getType();
- auto resultType = bitFieldOp.getResult()->getType();
+ auto baseType = bitFieldOp.base().getType();
+ auto insertType = bitFieldOp.insert().getType();
+ auto resultType = bitFieldOp.getResult().getType();
if ((baseType != insertType) || (baseType != resultType)) {
return bitFieldOp.emitError("expected the same type for the base operand, "
OpAsmPrinter &printer) {
printer << spirv::CompositeConstructOp::getOperationName() << " "
<< compositeConstructOp.constituents() << " : "
- << compositeConstructOp.getResult()->getType();
+ << compositeConstructOp.getResult().getType();
}
static LogicalResult verify(spirv::CompositeConstructOp compositeConstructOp) {
}
for (auto index : llvm::seq<uint32_t>(0, constituents.size())) {
- if (constituents[index]->getType() != cType.getElementType(index)) {
+ if (constituents[index].getType() != cType.getElementType(index)) {
return compositeConstructOp.emitError(
"operand type mismatch: expected operand type ")
<< cType.getElementType(index) << ", but provided "
- << constituents[index]->getType();
+ << constituents[index].getType();
}
}
ArrayRef<int32_t> indices) {
auto indexAttr = builder->getI32ArrayAttr(indices);
auto elementType =
- getElementType(composite->getType(), indexAttr, state.location);
+ getElementType(composite.getType(), indexAttr, state.location);
if (!elementType) {
return;
}
static void print(spirv::CompositeExtractOp compositeExtractOp,
OpAsmPrinter &printer) {
printer << spirv::CompositeExtractOp::getOperationName() << ' '
- << *compositeExtractOp.composite() << compositeExtractOp.indices()
- << " : " << compositeExtractOp.composite()->getType();
+ << compositeExtractOp.composite() << compositeExtractOp.indices()
+ << " : " << compositeExtractOp.composite().getType();
}
static LogicalResult verify(spirv::CompositeExtractOp compExOp) {
auto indicesArrayAttr = compExOp.indices().dyn_cast<ArrayAttr>();
- auto resultType = getElementType(compExOp.composite()->getType(),
+ auto resultType = getElementType(compExOp.composite().getType(),
indicesArrayAttr, compExOp.getLoc());
if (!resultType)
return failure();
static LogicalResult verify(spirv::CompositeInsertOp compositeInsertOp) {
auto indicesArrayAttr = compositeInsertOp.indices().dyn_cast<ArrayAttr>();
auto objectType =
- getElementType(compositeInsertOp.composite()->getType(), indicesArrayAttr,
+ getElementType(compositeInsertOp.composite().getType(), indicesArrayAttr,
compositeInsertOp.getLoc());
if (!objectType)
return failure();
- if (objectType != compositeInsertOp.object()->getType()) {
+ if (objectType != compositeInsertOp.object().getType()) {
return compositeInsertOp.emitOpError("object operand type should be ")
<< objectType << ", but found "
- << compositeInsertOp.object()->getType();
+ << compositeInsertOp.object().getType();
}
- if (compositeInsertOp.composite()->getType() != compositeInsertOp.getType()) {
+ if (compositeInsertOp.composite().getType() != compositeInsertOp.getType()) {
return compositeInsertOp.emitOpError("result type should be the same as "
"the composite type, but found ")
- << compositeInsertOp.composite()->getType() << " vs "
+ << compositeInsertOp.composite().getType() << " vs "
<< compositeInsertOp.getType();
}
static void print(spirv::CompositeInsertOp compositeInsertOp,
OpAsmPrinter &printer) {
printer << spirv::CompositeInsertOp::getOperationName() << " "
- << *compositeInsertOp.object() << ", "
- << *compositeInsertOp.composite() << compositeInsertOp.indices()
- << " : " << compositeInsertOp.object()->getType() << " into "
- << compositeInsertOp.composite()->getType();
+ << compositeInsertOp.object() << ", " << compositeInsertOp.composite()
+ << compositeInsertOp.indices() << " : "
+ << compositeInsertOp.object().getType() << " into "
+ << compositeInsertOp.composite().getType();
}
//===----------------------------------------------------------------------===//
}
for (uint32_t i = 0, e = functionType.getNumInputs(); i != e; ++i) {
- if (functionCallOp.getOperand(i)->getType() != functionType.getInput(i)) {
+ if (functionCallOp.getOperand(i).getType() != functionType.getInput(i)) {
return functionCallOp.emitOpError(
"operand type mismatch: expected operand type ")
<< functionType.getInput(i) << ", but provided "
- << functionCallOp.getOperand(i)->getType()
- << " for operand number " << i;
+ << functionCallOp.getOperand(i).getType() << " for operand number "
+ << i;
}
}
}
if (functionCallOp.getNumResults() &&
- (functionCallOp.getResult(0)->getType() != functionType.getResult(0))) {
+ (functionCallOp.getResult(0).getType() != functionType.getResult(0))) {
return functionCallOp.emitOpError("result type mismatch: expected ")
<< functionType.getResult(0) << ", but provided "
- << functionCallOp.getResult(0)->getType();
+ << functionCallOp.getResult(0).getType();
}
return success();
void spirv::LoadOp::build(Builder *builder, OperationState &state,
Value basePtr, IntegerAttr memory_access,
IntegerAttr alignment) {
- auto ptrType = basePtr->getType().cast<spirv::PointerType>();
+ auto ptrType = basePtr.getType().cast<spirv::PointerType>();
build(builder, state, ptrType.getPointeeType(), basePtr, memory_access,
alignment);
}
auto *op = loadOp.getOperation();
SmallVector<StringRef, 4> elidedAttrs;
StringRef sc = stringifyStorageClass(
- loadOp.ptr()->getType().cast<spirv::PointerType>().getStorageClass());
+ loadOp.ptr().getType().cast<spirv::PointerType>().getStorageClass());
printer << spirv::LoadOp::getOperationName() << " \"" << sc << "\" "
<< loadOp.ptr();
static void print(spirv::ReferenceOfOp referenceOfOp, OpAsmPrinter &printer) {
printer << spirv::ReferenceOfOp::getOperationName() << ' ';
printer.printSymbolName(referenceOfOp.spec_const());
- printer << " : " << referenceOfOp.reference()->getType();
+ printer << " : " << referenceOfOp.reference().getType();
}
static LogicalResult verify(spirv::ReferenceOfOp referenceOfOp) {
if (!specConstOp) {
return referenceOfOp.emitOpError("expected spv.specConstant symbol");
}
- if (referenceOfOp.reference()->getType() !=
+ if (referenceOfOp.reference().getType() !=
specConstOp.default_value().getType()) {
return referenceOfOp.emitOpError("result type mismatch with the referenced "
"specialization constant's type");
static void print(spirv::ReturnValueOp retValOp, OpAsmPrinter &printer) {
printer << spirv::ReturnValueOp::getOperationName() << ' ' << retValOp.value()
- << " : " << retValOp.value()->getType();
+ << " : " << retValOp.value().getType();
}
static LogicalResult verify(spirv::ReturnValueOp retValOp) {
"returns 1 value but enclosing function requires ")
<< numFnResults << " results";
- auto operandType = retValOp.value()->getType();
+ auto operandType = retValOp.value().getType();
auto fnResultType = funcOp.getType().getResult(0);
if (operandType != fnResultType)
return retValOp.emitOpError(" return value's type (")
void spirv::SelectOp::build(Builder *builder, OperationState &state, Value cond,
Value trueValue, Value falseValue) {
- build(builder, state, trueValue->getType(), cond, trueValue, falseValue);
+ build(builder, state, trueValue.getType(), cond, trueValue, falseValue);
}
static ParseResult parseSelectOp(OpAsmParser &parser, OperationState &state) {
static void print(spirv::SelectOp op, OpAsmPrinter &printer) {
printer << spirv::SelectOp::getOperationName() << " " << op.getOperands()
- << " : " << op.condition()->getType() << ", "
- << op.result()->getType();
+ << " : " << op.condition().getType() << ", " << op.result().getType();
}
static LogicalResult verify(spirv::SelectOp op) {
- auto resultTy = op.result()->getType();
- if (op.true_value()->getType() != resultTy) {
+ auto resultTy = op.result().getType();
+ if (op.true_value().getType() != resultTy) {
return op.emitOpError("result type and true value type must be the same");
}
- if (op.false_value()->getType() != resultTy) {
+ if (op.false_value().getType() != resultTy) {
return op.emitOpError("result type and false value type must be the same");
}
- if (auto conditionTy = op.condition()->getType().dyn_cast<VectorType>()) {
+ if (auto conditionTy = op.condition().getType().dyn_cast<VectorType>()) {
auto resultVectorTy = resultTy.dyn_cast<VectorType>();
if (!resultVectorTy) {
return op.emitOpError("result expected to be of vector type when "
cast<spirv::StoreOp>(trueBlock->front()).getOperation()->getAttrs();
auto selectOp = rewriter.create<spirv::SelectOp>(
- selectionOp.getLoc(), trueValue->getType(), brConditionalOp.condition(),
+ selectionOp.getLoc(), trueValue.getType(), brConditionalOp.condition(),
trueValue, falseValue);
rewriter.create<spirv::StoreOp>(selectOp.getLoc(), ptrValue,
selectOp.getResult(), storeOpAttributes);
// attributes and a valid type of the value.
if ((trueBrStoreOp.ptr() != falseBrStoreOp.ptr()) ||
!isSameAttrList(trueBrStoreOp, falseBrStoreOp) ||
- !isValidType(trueBrStoreOp.value()->getType())) {
+ !isValidType(trueBrStoreOp.value().getType())) {
return matchFailure();
}
auto *op = storeOp.getOperation();
SmallVector<StringRef, 4> elidedAttrs;
StringRef sc = stringifyStorageClass(
- storeOp.ptr()->getType().cast<spirv::PointerType>().getStorageClass());
+ storeOp.ptr().getType().cast<spirv::PointerType>().getStorageClass());
printer << spirv::StoreOp::getOperationName() << " \"" << sc << "\" "
<< storeOp.ptr() << ", " << storeOp.value();
printMemoryAccessAttribute(storeOp, printer, elidedAttrs);
- printer << " : " << storeOp.value()->getType();
+ printer << " : " << storeOp.value().getType();
printer.printOptionalAttrDict(op->getAttrs(), elidedAttrs);
}
"spv.globalVariable for module-level variables.");
}
- auto pointerType = varOp.pointer()->getType().cast<spirv::PointerType>();
+ auto pointerType = varOp.pointer().getType().cast<spirv::PointerType>();
if (varOp.storage_class() != pointerType.getStorageClass())
return varOp.emitOpError(
"storage class must match result pointer's storage class");
if (varOp.getNumOperands() != 0) {
// SPIR-V spec: "Initializer must be an <id> from a constant instruction or
// a global (module scope) OpVariable instruction".
- auto *initOp = varOp.getOperand(0)->getDefiningOp();
+ auto *initOp = varOp.getOperand(0).getDefiningOp();
if (!initOp || !(isa<spirv::ConstantOp>(initOp) || // for normal constant
isa<spirv::ReferenceOfOp>(initOp) || // for spec constant
isa<spirv::AddressOfOp>(initOp)))
<< " from block " << block << "\n");
if (!isFnEntryBlock(block)) {
for (BlockArgument blockArg : block->getArguments()) {
- auto newArg = newBlock->addArgument(blockArg->getType());
+ auto newArg = newBlock->addArgument(blockArg.getType());
mapper.map(blockArg, newArg);
LLVM_DEBUG(llvm::dbgs() << "[cf] remapped block argument " << blockArg
<< " to " << newArg << '\n');
// make sure the old merge block has the same block argument list.
assert(mergeBlock->args_empty() && "OpPhi in loop merge block unsupported");
for (BlockArgument blockArg : headerBlock->getArguments()) {
- mergeBlock->addArgument(blockArg->getType());
+ mergeBlock->addArgument(blockArg.getType());
}
// If the loop header block has block arguments, make sure the spv.branch op
"spirv::BitcastOp, only ")
<< wordIndex << " of " << words.size() << " processed";
}
- if (resultTypes[0] == operands[0]->getType() &&
+ if (resultTypes[0] == operands[0].getType() &&
resultTypes[0].isa<IntegerType>()) {
// TODO(b/130356985): This check is added to ignore error in Op verification
// due to both signed and unsigned integers mapping to the same
Value val = valueIDPair.first;
os << " " << val << " "
<< "id = " << valueIDPair.second << ' ';
- if (auto *op = val->getDefiningOp()) {
+ if (auto *op = val.getDefiningOp()) {
os << "from op '" << op->getName() << "'";
} else if (auto arg = val.dyn_cast<BlockArgument>()) {
- Block *block = arg->getOwner();
+ Block *block = arg.getOwner();
os << "from argument of block " << block << ' ';
os << " in op '" << block->getParentOp()->getName() << "'";
}
// Declare the parameters.
for (auto arg : op.getArguments()) {
uint32_t argTypeID = 0;
- if (failed(processType(op.getLoc(), arg->getType(), argTypeID))) {
+ if (failed(processType(op.getLoc(), arg.getType(), argTypeID))) {
return failure();
}
auto argValueID = getNextID();
// Get the type <id> and result <id> for this OpPhi instruction.
uint32_t phiTypeID = 0;
- if (failed(processType(arg->getLoc(), arg->getType(), phiTypeID)))
+ if (failed(processType(arg.getLoc(), arg.getType(), phiTypeID)))
return failure();
uint32_t phiID = getNextID();
// Change the type for the direct users.
target.addDynamicallyLegalOp<spirv::AddressOfOp>([](spirv::AddressOfOp op) {
- return VulkanLayoutUtils::isLegalType(op.pointer()->getType());
+ return VulkanLayoutUtils::isLegalType(op.pointer().getType());
});
// TODO: Change the type for the indirect users such as spv.Load, spv.Store,
// Replace the values directly with the return operands.
assert(returnOp.getNumOperands() == valuesToRepl.size());
for (const auto &it : llvm::enumerate(returnOp.getOperands()))
- valuesToRepl[it.index()]->replaceAllUsesWith(it.value());
+ valuesToRepl[it.index()].replaceAllUsesWith(it.value());
}
};
} // end anonymous namespace
int stdDotLen = StandardOpsDialect::getDialectNamespace().size() + 1;
p << op->getName().getStringRef().drop_front(stdDotLen) << ' '
- << *op->getOperand(0);
+ << op->getOperand(0);
p.printOptionalAttrDict(op->getAttrs());
- p << " : " << op->getOperand(0)->getType();
+ p << " : " << op->getOperand(0).getType();
}
/// A custom binary operation printer that omits the "std." prefix from the
// If not all the operand and result types are the same, just use the
// generic assembly form to avoid omitting information in printing.
- auto resultType = op->getResult(0)->getType();
- if (op->getOperand(0)->getType() != resultType ||
- op->getOperand(1)->getType() != resultType) {
+ auto resultType = op->getResult(0).getType();
+ if (op->getOperand(0).getType() != resultType ||
+ op->getOperand(1).getType() != resultType) {
p.printGenericOp(op);
return;
}
int stdDotLen = StandardOpsDialect::getDialectNamespace().size() + 1;
p << op->getName().getStringRef().drop_front(stdDotLen) << ' '
- << *op->getOperand(0) << ", " << *op->getOperand(1);
+ << op->getOperand(0) << ", " << op->getOperand(1);
p.printOptionalAttrDict(op->getAttrs());
// Now we can output only one type for all operands and the result.
- p << " : " << op->getResult(0)->getType();
+ p << " : " << op->getResult(0).getType();
}
/// A custom cast operation printer that omits the "std." prefix from the
static void printStandardCastOp(Operation *op, OpAsmPrinter &p) {
int stdDotLen = StandardOpsDialect::getDialectNamespace().size() + 1;
p << op->getName().getStringRef().drop_front(stdDotLen) << ' '
- << *op->getOperand(0) << " : " << op->getOperand(0)->getType() << " to "
- << op->getResult(0)->getType();
+ << op->getOperand(0) << " : " << op->getOperand(0).getType() << " to "
+ << op->getResult(0).getType();
}
/// A custom cast operation verifier.
template <typename T> static LogicalResult verifyCastOp(T op) {
- auto opType = op.getOperand()->getType();
+ auto opType = op.getOperand().getType();
auto resType = op.getType();
if (!T::areCastCompatible(opType, resType))
return op.emitError("operand type ") << opType << " and result type "
static LogicalResult foldMemRefCast(Operation *op) {
bool folded = false;
for (OpOperand &operand : op->getOpOperands()) {
- auto cast = dyn_cast_or_null<MemRefCastOp>(operand.get()->getDefiningOp());
- if (cast && !cast.getOperand()->getType().isa<UnrankedMemRefType>()) {
+ auto cast = dyn_cast_or_null<MemRefCastOp>(operand.get().getDefiningOp());
+ if (cast && !cast.getOperand().getType().isa<UnrankedMemRefType>()) {
operand.set(cast.getOperand());
folded = true;
}
}
static LogicalResult verify(AllocOp op) {
- auto memRefType = op.getResult()->getType().dyn_cast<MemRefType>();
+ auto memRefType = op.getResult().getType().dyn_cast<MemRefType>();
if (!memRefType)
return op.emitOpError("result must be a memref");
newShapeConstants.push_back(dimSize);
continue;
}
- auto *defOp = alloc.getOperand(dynamicDimPos)->getDefiningOp();
+ auto *defOp = alloc.getOperand(dynamicDimPos).getDefiningOp();
if (auto constantIndexOp = dyn_cast_or_null<ConstantIndexOp>(defOp)) {
// Dynamic shape dimension will be folded.
newShapeConstants.push_back(constantIndexOp.getValue());
return op.emitOpError("incorrect number of operands for callee");
for (unsigned i = 0, e = fnType.getNumInputs(); i != e; ++i)
- if (op.getOperand(i)->getType() != fnType.getInput(i))
+ if (op.getOperand(i).getType() != fnType.getInput(i))
return op.emitOpError("operand type mismatch");
if (fnType.getNumResults() != op.getNumResults())
return op.emitOpError("incorrect number of results for callee");
for (unsigned i = 0, e = fnType.getNumResults(); i != e; ++i)
- if (op.getResult(i)->getType() != fnType.getResult(i))
+ if (op.getResult(i).getType() != fnType.getResult(i))
return op.emitOpError("result type mismatch");
return success();
static void print(OpAsmPrinter &p, CallIndirectOp op) {
p << "call_indirect " << op.getCallee() << '(' << op.getArgOperands() << ')';
p.printOptionalAttrDict(op.getAttrs(), /*elidedAttrs=*/{"callee"});
- p << " : " << op.getCallee()->getType();
+ p << " : " << op.getCallee().getType();
}
static LogicalResult verify(CallIndirectOp op) {
// The callee must be a function.
- auto fnType = op.getCallee()->getType().dyn_cast<FunctionType>();
+ auto fnType = op.getCallee().getType().dyn_cast<FunctionType>();
if (!fnType)
return op.emitOpError("callee must have function type");
return op.emitOpError("incorrect number of operands for callee");
for (unsigned i = 0, e = fnType.getNumInputs(); i != e; ++i)
- if (op.getOperand(i + 1)->getType() != fnType.getInput(i))
+ if (op.getOperand(i + 1).getType() != fnType.getInput(i))
return op.emitOpError("operand type mismatch");
if (fnType.getNumResults() != op.getNumResults())
return op.emitOpError("incorrect number of results for callee");
for (unsigned i = 0, e = fnType.getNumResults(); i != e; ++i)
- if (op.getResult(i)->getType() != fnType.getResult(i))
+ if (op.getResult(i).getType() != fnType.getResult(i))
return op.emitOpError("result type mismatch");
return success();
static void buildCmpIOp(Builder *build, OperationState &result,
CmpIPredicate predicate, Value lhs, Value rhs) {
result.addOperands({lhs, rhs});
- result.types.push_back(getI1SameShape(build, lhs->getType()));
+ result.types.push_back(getI1SameShape(build, lhs.getType()));
result.addAttribute(
CmpIOp::getPredicateAttrName(),
build->getI64IntegerAttr(static_cast<int64_t>(predicate)));
<< '"' << ", " << op.lhs() << ", " << op.rhs();
p.printOptionalAttrDict(op.getAttrs(),
/*elidedAttrs=*/{CmpIOp::getPredicateAttrName()});
- p << " : " << op.lhs()->getType();
+ p << " : " << op.lhs().getType();
}
// Compute `lhs` `pred` `rhs`, where `pred` is one of the known integer
static void buildCmpFOp(Builder *build, OperationState &result,
CmpFPredicate predicate, Value lhs, Value rhs) {
result.addOperands({lhs, rhs});
- result.types.push_back(getI1SameShape(build, lhs->getType()));
+ result.types.push_back(getI1SameShape(build, lhs.getType()));
result.addAttribute(
CmpFOp::getPredicateAttrName(),
build->getI64IntegerAttr(static_cast<int64_t>(predicate)));
<< ", " << op.rhs();
p.printOptionalAttrDict(op.getAttrs(),
/*elidedAttrs=*/{CmpFOp::getPredicateAttrName()});
- p << " : " << op.lhs()->getType();
+ p << " : " << op.lhs().getType();
}
static LogicalResult verify(CmpFOp op) {
}
bool ConstantFloatOp::classof(Operation *op) {
- return ConstantOp::classof(op) &&
- op->getResult(0)->getType().isa<FloatType>();
+ return ConstantOp::classof(op) && op->getResult(0).getType().isa<FloatType>();
}
/// ConstantIntOp only matches values whose result type is an IntegerType.
bool ConstantIntOp::classof(Operation *op) {
return ConstantOp::classof(op) &&
- op->getResult(0)->getType().isa<IntegerType>();
+ op->getResult(0).getType().isa<IntegerType>();
}
void ConstantIntOp::build(Builder *builder, OperationState &result,
/// ConstantIndexOp only matches values whose result type is Index.
bool ConstantIndexOp::classof(Operation *op) {
- return ConstantOp::classof(op) && op->getResult(0)->getType().isIndex();
+ return ConstantOp::classof(op) && op->getResult(0).getType().isIndex();
}
void ConstantIndexOp::build(Builder *builder, OperationState &result,
PatternRewriter &rewriter) const override {
// Check that the memref operand's defining operation is an AllocOp.
Value memref = dealloc.memref();
- if (!isa_and_nonnull<AllocOp>(memref->getDefiningOp()))
+ if (!isa_and_nonnull<AllocOp>(memref.getDefiningOp()))
return matchFailure();
// Check that all of the uses of the AllocOp are other DeallocOps.
- for (auto *user : memref->getUsers())
+ for (auto *user : memref.getUsers())
if (!isa<DeallocOp>(user))
return matchFailure();
} // end anonymous namespace.
static void print(OpAsmPrinter &p, DeallocOp op) {
- p << "dealloc " << *op.memref() << " : " << op.memref()->getType();
+ p << "dealloc " << op.memref() << " : " << op.memref().getType();
}
static ParseResult parseDeallocOp(OpAsmParser &parser, OperationState &result) {
}
static LogicalResult verify(DeallocOp op) {
- if (!op.memref()->getType().isa<MemRefType>())
+ if (!op.memref().getType().isa<MemRefType>())
return op.emitOpError("operand must be a memref");
return success();
}
//===----------------------------------------------------------------------===//
static void print(OpAsmPrinter &p, DimOp op) {
- p << "dim " << *op.getOperand() << ", " << op.getIndex();
+ p << "dim " << op.getOperand() << ", " << op.getIndex();
p.printOptionalAttrDict(op.getAttrs(), /*elidedAttrs=*/{"index"});
- p << " : " << op.getOperand()->getType();
+ p << " : " << op.getOperand().getType();
}
static ParseResult parseDimOp(OpAsmParser &parser, OperationState &result) {
return op.emitOpError("requires an integer attribute named 'index'");
int64_t index = indexAttr.getValue().getSExtValue();
- auto type = op.getOperand()->getType();
+ auto type = op.getOperand().getType();
if (auto tensorType = type.dyn_cast<RankedTensorType>()) {
if (index >= tensorType.getRank())
return op.emitOpError("index is out of range");
OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
// Constant fold dim when the size along the index referred to is a constant.
- auto opType = memrefOrTensor()->getType();
+ auto opType = memrefOrTensor().getType();
int64_t indexSize = -1;
if (auto tensorType = opType.dyn_cast<RankedTensorType>())
indexSize = tensorType.getShape()[getIndex()];
return {};
// The size at getIndex() is now a dynamic size of a memref.
- auto memref = memrefOrTensor()->getDefiningOp();
+ auto memref = memrefOrTensor().getDefiningOp();
if (auto alloc = dyn_cast_or_null<AllocOp>(memref))
return *(alloc.getDynamicSizes().begin() +
memrefType.getDynamicDimIndex(getIndex()));
}
void DmaStartOp::print(OpAsmPrinter &p) {
- p << "dma_start " << *getSrcMemRef() << '[' << getSrcIndices() << "], "
- << *getDstMemRef() << '[' << getDstIndices() << "], " << *getNumElements()
- << ", " << *getTagMemRef() << '[' << getTagIndices() << ']';
+ p << "dma_start " << getSrcMemRef() << '[' << getSrcIndices() << "], "
+ << getDstMemRef() << '[' << getDstIndices() << "], " << getNumElements()
+ << ", " << getTagMemRef() << '[' << getTagIndices() << ']';
if (isStrided())
- p << ", " << *getStride() << ", " << *getNumElementsPerStride();
+ p << ", " << getStride() << ", " << getNumElementsPerStride();
p.printOptionalAttrDict(getAttrs());
- p << " : " << getSrcMemRef()->getType();
- p << ", " << getDstMemRef()->getType();
- p << ", " << getTagMemRef()->getType();
+ p << " : " << getSrcMemRef().getType() << ", " << getDstMemRef().getType()
+ << ", " << getTagMemRef().getType();
}
// Parse DmaStartOp.
p << "dma_wait " << getTagMemRef() << '[' << getTagIndices() << "], "
<< getNumElements();
p.printOptionalAttrDict(getAttrs());
- p << " : " << getTagMemRef()->getType();
+ p << " : " << getTagMemRef().getType();
}
// Parse DmaWaitOp.
//===----------------------------------------------------------------------===//
static void print(OpAsmPrinter &p, ExtractElementOp op) {
- p << "extract_element " << *op.getAggregate() << '[' << op.getIndices();
+ p << "extract_element " << op.getAggregate() << '[' << op.getIndices();
p << ']';
p.printOptionalAttrDict(op.getAttrs());
- p << " : " << op.getAggregate()->getType();
+ p << " : " << op.getAggregate().getType();
}
static ParseResult parseExtractElementOp(OpAsmParser &parser,
}
static LogicalResult verify(ExtractElementOp op) {
- auto aggregateType = op.getAggregate()->getType().cast<ShapedType>();
+ auto aggregateType = op.getAggregate().getType().cast<ShapedType>();
// This should be possible with tablegen type constraints
if (op.getType() != aggregateType.getElementType())
//===----------------------------------------------------------------------===//
static void print(OpAsmPrinter &p, LoadOp op) {
- p << "load " << *op.getMemRef() << '[' << op.getIndices() << ']';
+ p << "load " << op.getMemRef() << '[' << op.getIndices() << ']';
p.printOptionalAttrDict(op.getAttrs());
p << " : " << op.getMemRefType();
}
//===----------------------------------------------------------------------===//
static void print(OpAsmPrinter &p, PrefetchOp op) {
- p << PrefetchOp::getOperationName() << " " << *op.memref() << '[';
+ p << PrefetchOp::getOperationName() << " " << op.memref() << '[';
p.printOperands(op.indices());
p << ']' << ", " << (op.isWrite() ? "write" : "read");
p << ", locality<" << op.localityHint();
//===----------------------------------------------------------------------===//
static void print(OpAsmPrinter &p, RankOp op) {
- p << "rank " << *op.getOperand() << " : " << op.getOperand()->getType();
+ p << "rank " << op.getOperand() << " : " << op.getOperand().getType();
}
static ParseResult parseRankOp(OpAsmParser &parser, OperationState &result) {
OpFoldResult RankOp::fold(ArrayRef<Attribute> operands) {
// Constant fold rank when the rank of the tensor is known.
- auto type = getOperand()->getType();
+ auto type = getOperand().getType();
if (auto tensorType = type.dyn_cast<RankedTensorType>())
return IntegerAttr::get(IndexType::get(getContext()), tensorType.getRank());
return IntegerAttr();
<< " operands, but enclosing function returns " << results.size();
for (unsigned i = 0, e = results.size(); i != e; ++i)
- if (op.getOperand(i)->getType() != results[i])
+ if (op.getOperand(i).getType() != results[i])
return op.emitError()
<< "type of return operand " << i << " ("
- << op.getOperand(i)->getType()
+ << op.getOperand(i).getType()
<< ") doesn't match function result type (" << results[i] << ")";
return success();
}
static void print(OpAsmPrinter &p, SelectOp op) {
- p << "select " << op.getOperands() << " : " << op.getTrueValue()->getType();
+ p << "select " << op.getOperands() << " : " << op.getTrueValue().getType();
p.printOptionalAttrDict(op.getAttrs());
}
static LogicalResult verify(SelectOp op) {
- auto trueType = op.getTrueValue()->getType();
- auto falseType = op.getFalseValue()->getType();
+ auto trueType = op.getTrueValue().getType();
+ auto falseType = op.getFalseValue().getType();
if (trueType != falseType)
return op.emitOpError(
static LogicalResult verify(SignExtendIOp op) {
// Get the scalar type (which is either directly the type of the operand
// or the vector's/tensor's element type.
- auto srcType = getElementTypeOrSelf(op.getOperand()->getType());
+ auto srcType = getElementTypeOrSelf(op.getOperand().getType());
auto dstType = getElementTypeOrSelf(op.getType());
// For now, index is forbidden for the source and the destination type.
//===----------------------------------------------------------------------===//
static void print(OpAsmPrinter &p, SplatOp op) {
- p << "splat " << *op.getOperand();
+ p << "splat " << op.getOperand();
p.printOptionalAttrDict(op.getAttrs());
p << " : " << op.getType();
}
static LogicalResult verify(SplatOp op) {
// TODO: we could replace this by a trait.
- if (op.getOperand()->getType() !=
+ if (op.getOperand().getType() !=
op.getType().cast<ShapedType>().getElementType())
return op.emitError("operand should be of elemental type of result type");
//===----------------------------------------------------------------------===//
static void print(OpAsmPrinter &p, StoreOp op) {
- p << "store " << *op.getValueToStore();
- p << ", " << *op.getMemRef() << '[' << op.getIndices() << ']';
+ p << "store " << op.getValueToStore();
+ p << ", " << op.getMemRef() << '[' << op.getIndices() << ']';
p.printOptionalAttrDict(op.getAttrs());
p << " : " << op.getMemRefType();
}
static LogicalResult verify(StoreOp op) {
// First operand must have same type as memref element type.
- if (op.getValueToStore()->getType() != op.getMemRefType().getElementType())
+ if (op.getValueToStore().getType() != op.getMemRefType().getElementType())
return op.emitOpError(
"first operand must have same type memref element type");
//===----------------------------------------------------------------------===//
static void print(OpAsmPrinter &p, TensorLoadOp op) {
- p << "tensor_load " << *op.getOperand();
+ p << "tensor_load " << op.getOperand();
p.printOptionalAttrDict(op.getAttrs());
- p << " : " << op.getOperand()->getType();
+ p << " : " << op.getOperand().getType();
}
static ParseResult parseTensorLoadOp(OpAsmParser &parser,
//===----------------------------------------------------------------------===//
static void print(OpAsmPrinter &p, TensorStoreOp op) {
- p << "tensor_store " << *op.tensor() << ", " << *op.memref();
+ p << "tensor_store " << op.tensor() << ", " << op.memref();
p.printOptionalAttrDict(op.getAttrs());
- p << " : " << op.memref()->getType();
+ p << " : " << op.memref().getType();
}
static ParseResult parseTensorStoreOp(OpAsmParser &parser,
//===----------------------------------------------------------------------===//
static LogicalResult verify(TruncateIOp op) {
- auto srcType = getElementTypeOrSelf(op.getOperand()->getType());
+ auto srcType = getElementTypeOrSelf(op.getOperand().getType());
auto dstType = getElementTypeOrSelf(op.getType());
if (srcType.isa<IndexType>())
}
static void print(OpAsmPrinter &p, ViewOp op) {
- p << op.getOperationName() << ' ' << *op.getOperand(0) << '[';
+ p << op.getOperationName() << ' ' << op.getOperand(0) << '[';
auto dynamicOffset = op.getDynamicOffset();
if (dynamicOffset != nullptr)
p.printOperand(dynamicOffset);
p << "][" << op.getDynamicSizes() << ']';
p.printOptionalAttrDict(op.getAttrs());
- p << " : " << op.getOperand(0)->getType() << " to " << op.getType();
+ p << " : " << op.getOperand(0).getType() << " to " << op.getType();
}
Value ViewOp::getDynamicOffset() {
}
static LogicalResult verify(ViewOp op) {
- auto baseType = op.getOperand(0)->getType().cast<MemRefType>();
- auto viewType = op.getResult()->getType().cast<MemRefType>();
+ auto baseType = op.getOperand(0).getType().cast<MemRefType>();
+ auto viewType = op.getResult().getType().cast<MemRefType>();
// The base memref should have identity layout map (or none).
if (baseType.getAffineMaps().size() > 1 ||
int64_t newOffset = oldOffset;
unsigned dynamicOffsetOperandCount = 0;
if (dynamicOffset != nullptr) {
- auto *defOp = dynamicOffset->getDefiningOp();
+ auto *defOp = dynamicOffset.getDefiningOp();
if (auto constantIndexOp = dyn_cast_or_null<ConstantIndexOp>(defOp)) {
// Dynamic offset will be folded into the map.
newOffset = constantIndexOp.getValue();
newShapeConstants.push_back(dimSize);
continue;
}
- auto *defOp = viewOp.getOperand(dynamicDimPos)->getDefiningOp();
+ auto *defOp = viewOp.getOperand(dynamicDimPos).getDefiningOp();
if (auto constantIndexOp = dyn_cast_or_null<ConstantIndexOp>(defOp)) {
// Dynamic shape dimension will be folded.
newShapeConstants.push_back(constantIndexOp.getValue());
ValueRange strides, Type resultType,
ArrayRef<NamedAttribute> attrs) {
if (!resultType)
- resultType = inferSubViewResultType(source->getType().cast<MemRefType>());
+ resultType = inferSubViewResultType(source.getType().cast<MemRefType>());
auto segmentAttr = b->getI32VectorAttr(
{1, static_cast<int>(offsets.size()), static_cast<int32_t>(sizes.size()),
static_cast<int32_t>(strides.size())});
}
static void print(OpAsmPrinter &p, SubViewOp op) {
- p << op.getOperationName() << ' ' << *op.getOperand(0) << '[' << op.offsets()
+ p << op.getOperationName() << ' ' << op.getOperand(0) << '[' << op.offsets()
<< "][" << op.sizes() << "][" << op.strides() << ']';
SmallVector<StringRef, 1> elidedAttrs = {
SubViewOp::getOperandSegmentSizeAttr()};
p.printOptionalAttrDict(op.getAttrs(), elidedAttrs);
- p << " : " << op.getOperand(0)->getType() << " to " << op.getType();
+ p << " : " << op.getOperand(0).getType() << " to " << op.getType();
}
static LogicalResult verify(SubViewOp op) {
}
raw_ostream &mlir::operator<<(raw_ostream &os, SubViewOp::Range &range) {
- return os << "range " << *range.offset << ":" << *range.size << ":"
- << *range.stride;
+ return os << "range " << range.offset << ":" << range.size << ":"
+ << range.stride;
}
SmallVector<SubViewOp::Range, 8> SubViewOp::getRanges() {
}
SmallVector<int64_t, 4> staticShape(subViewOp.getNumSizes());
for (auto size : llvm::enumerate(subViewOp.sizes())) {
- auto defOp = size.value()->getDefiningOp();
+ auto defOp = size.value().getDefiningOp();
assert(defOp);
staticShape[size.index()] = cast<ConstantIndexOp>(defOp).getValue();
}
SmallVector<int64_t, 4> staticStrides(subViewOp.getNumStrides());
for (auto stride : llvm::enumerate(subViewOp.strides())) {
- auto defOp = stride.value()->getDefiningOp();
+ auto defOp = stride.value().getDefiningOp();
assert(defOp);
assert(baseStrides[stride.index()] > 0);
staticStrides[stride.index()] =
auto staticOffset = baseOffset;
for (auto offset : llvm::enumerate(subViewOp.offsets())) {
- auto defOp = offset.value()->getDefiningOp();
+ auto defOp = offset.value().getDefiningOp();
assert(defOp);
assert(baseStrides[offset.index()] > 0);
staticOffset +=
//===----------------------------------------------------------------------===//
static LogicalResult verify(ZeroExtendIOp op) {
- auto srcType = getElementTypeOrSelf(op.getOperand()->getType());
+ auto srcType = getElementTypeOrSelf(op.getOperand().getType());
auto dstType = getElementTypeOrSelf(op.getType());
if (srcType.isa<IndexType>())
assert(op->getNumResults() == 1 &&
"only support broadcast check on one result");
- auto type1 = op->getOperand(0)->getType();
- auto type2 = op->getOperand(1)->getType();
- auto retType = op->getResult(0)->getType();
+ auto type1 = op->getOperand(0).getType();
+ auto type2 = op->getOperand(1).getType();
+ auto retType = op->getResult(0).getType();
// We forbid broadcasting vector and tensor.
if (hasBothVectorAndTensorType({type1, type2, retType}))
ArrayAttr indexingMaps,
ArrayAttr iteratorTypes) {
result.addOperands({lhs, rhs, acc});
- result.addTypes(acc->getType());
+ result.addTypes(acc.getType());
result.addAttribute(getIndexingMapsAttrName(), indexingMaps);
result.addAttribute(getIteratorTypesAttrName(), iteratorTypes);
}
attrs.push_back(attr);
auto dictAttr = DictionaryAttr::get(attrs, op.getContext());
- p << op.getOperationName() << " " << dictAttr << " " << *op.lhs() << ", ";
- p << *op.rhs() << ", " << *op.acc();
+ p << op.getOperationName() << " " << dictAttr << " " << op.lhs() << ", ";
+ p << op.rhs() << ", " << op.acc();
if (op.masks().size() == 2)
p << ", " << op.masks();
p.printOptionalAttrDict(op.getAttrs(), attrNames);
- p << " : " << op.lhs()->getType() << ", " << op.rhs()->getType() << " into "
+ p << " : " << op.lhs().getType() << ", " << op.rhs().getType() << " into "
<< op.getResultType();
}
if (map.getNumDims() != numIterators)
return op.emitOpError("expected indexing map ")
<< index << " to have " << numIterators << " number of inputs";
- auto operandType = op.getOperand(index)->getType().cast<VectorType>();
+ auto operandType = op.getOperand(index).getType().cast<VectorType>();
unsigned rank = operandType.getShape().size();
if (map.getNumResults() != rank)
return op.emitOpError("expected indexing map ")
//===----------------------------------------------------------------------===//
static void print(OpAsmPrinter &p, vector::ExtractElementOp op) {
- p << op.getOperationName() << " " << *op.vector() << "[" << *op.position()
- << " : " << op.position()->getType() << "]";
+ p << op.getOperationName() << " " << op.vector() << "[" << op.position()
+ << " : " << op.position().getType() << "]";
p.printOptionalAttrDict(op.getAttrs());
- p << " : " << op.vector()->getType();
+ p << " : " << op.vector().getType();
}
static ParseResult parseExtractElementOp(OpAsmParser &parser,
Value source, ArrayRef<int64_t> position) {
result.addOperands(source);
auto positionAttr = getVectorSubscriptAttr(*builder, position);
- result.addTypes(inferExtractOpResultType(source->getType().cast<VectorType>(),
+ result.addTypes(inferExtractOpResultType(source.getType().cast<VectorType>(),
positionAttr));
result.addAttribute(getPositionAttrName(), positionAttr);
}
static void print(OpAsmPrinter &p, vector::ExtractOp op) {
- p << op.getOperationName() << " " << *op.vector() << op.position();
+ p << op.getOperationName() << " " << op.vector() << op.position();
p.printOptionalAttrDict(op.getAttrs(), {"position"});
- p << " : " << op.vector()->getType();
+ p << " : " << op.vector().getType();
}
static ParseResult parseExtractOp(OpAsmParser &parser, OperationState &result) {
}
static void print(OpAsmPrinter &p, ExtractSlicesOp op) {
- p << op.getOperationName() << ' ' << *op.vector() << ", ";
+ p << op.getOperationName() << ' ' << op.vector() << ", ";
p << op.sizes() << ", " << op.strides();
p.printOptionalAttrDict(
op.getAttrs(),
/*elidedAttrs=*/{ExtractSlicesOp::getSizesAttrName(),
ExtractSlicesOp::getStridesAttrName()});
- p << " : " << op.vector()->getType();
+ p << " : " << op.vector().getType();
p << " into " << op.getResultTupleType();
}
//===----------------------------------------------------------------------===//
static void print(OpAsmPrinter &p, BroadcastOp op) {
- p << op.getOperationName() << " " << *op.source() << " : "
+ p << op.getOperationName() << " " << op.source() << " : "
<< op.getSourceType() << " to " << op.getVectorType();
}
Value v2, ArrayRef<int64_t> mask) {
result.addOperands({v1, v2});
auto maskAttr = getVectorSubscriptAttr(*builder, mask);
- result.addTypes(v1->getType());
+ result.addTypes(v1.getType());
result.addAttribute(getMaskAttrName(), maskAttr);
}
static void print(OpAsmPrinter &p, ShuffleOp op) {
- p << op.getOperationName() << " " << *op.v1() << ", " << *op.v2() << " "
+ p << op.getOperationName() << " " << op.v1() << ", " << op.v2() << " "
<< op.mask();
p.printOptionalAttrDict(op.getAttrs(), {ShuffleOp::getMaskAttrName()});
- p << " : " << op.v1()->getType() << ", " << op.v2()->getType();
+ p << " : " << op.v1().getType() << ", " << op.v2().getType();
}
static LogicalResult verify(ShuffleOp op) {
//===----------------------------------------------------------------------===//
static void print(OpAsmPrinter &p, InsertElementOp op) {
- p << op.getOperationName() << " " << *op.source() << ", " << *op.dest() << "["
- << *op.position() << " : " << op.position()->getType() << "]";
+ p << op.getOperationName() << " " << op.source() << ", " << op.dest() << "["
+ << op.position() << " : " << op.position().getType() << "]";
p.printOptionalAttrDict(op.getAttrs());
- p << " : " << op.dest()->getType();
+ p << " : " << op.dest().getType();
}
static ParseResult parseInsertElementOp(OpAsmParser &parser,
Value dest, ArrayRef<int64_t> position) {
result.addOperands({source, dest});
auto positionAttr = getVectorSubscriptAttr(*builder, position);
- result.addTypes(dest->getType());
+ result.addTypes(dest.getType());
result.addAttribute(getPositionAttrName(), positionAttr);
}
static void print(OpAsmPrinter &p, InsertOp op) {
- p << op.getOperationName() << " " << *op.source() << ", " << *op.dest()
+ p << op.getOperationName() << " " << op.source() << ", " << op.dest()
<< op.position();
p.printOptionalAttrDict(op.getAttrs(), {InsertOp::getPositionAttrName()});
p << " : " << op.getSourceType() << " into " << op.getDestVectorType();
}
static void print(OpAsmPrinter &p, InsertSlicesOp op) {
- p << op.getOperationName() << ' ' << *op.vectors() << ", ";
+ p << op.getOperationName() << ' ' << op.vectors() << ", ";
p << op.sizes() << ", " << op.strides();
p.printOptionalAttrDict(
op.getAttrs(),
/*elidedAttrs=*/{InsertSlicesOp::getSizesAttrName(),
InsertSlicesOp::getStridesAttrName()});
- p << " : " << op.vectors()->getType();
+ p << " : " << op.vectors().getType();
p << " into " << op.getResultVectorType();
}
result.addOperands({source, dest});
auto offsetsAttr = getVectorSubscriptAttr(*builder, offsets);
auto stridesAttr = getVectorSubscriptAttr(*builder, strides);
- result.addTypes(dest->getType());
+ result.addTypes(dest.getType());
result.addAttribute(getOffsetsAttrName(), offsetsAttr);
result.addAttribute(getStridesAttrName(), stridesAttr);
}
static void print(OpAsmPrinter &p, InsertStridedSliceOp op) {
- p << op.getOperationName() << " " << *op.source() << ", " << *op.dest()
- << " ";
+ p << op.getOperationName() << " " << op.source() << ", " << op.dest() << " ";
p.printOptionalAttrDict(op.getAttrs());
p << " : " << op.getSourceVectorType() << " into " << op.getDestVectorType();
}
//===----------------------------------------------------------------------===//
static void print(OpAsmPrinter &p, OuterProductOp op) {
- p << op.getOperationName() << " " << *op.lhs() << ", " << *op.rhs();
+ p << op.getOperationName() << " " << op.lhs() << ", " << op.rhs();
if (!op.acc().empty())
p << ", " << op.acc();
- p << " : " << op.lhs()->getType() << ", " << op.rhs()->getType();
+ p << " : " << op.lhs().getType() << ", " << op.rhs().getType();
}
static ParseResult parseOuterProductOp(OpAsmParser &parser,
//===----------------------------------------------------------------------===//
static void print(OpAsmPrinter &p, ReshapeOp op) {
- p << op.getOperationName() << " " << *op.vector() << ", [" << op.input_shape()
+ p << op.getOperationName() << " " << op.vector() << ", [" << op.input_shape()
<< "], [" << op.output_shape() << "], " << op.fixed_vector_sizes();
SmallVector<StringRef, 2> elidedAttrs = {
ReshapeOp::getOperandSegmentSizeAttr(),
// If all shape operands are produced by constant ops, verify that product
// of dimensions for input/output shape match.
auto isDefByConstant = [](Value operand) {
- return isa_and_nonnull<ConstantIndexOp>(operand->getDefiningOp());
+ return isa_and_nonnull<ConstantIndexOp>(operand.getDefiningOp());
};
if (llvm::all_of(op.input_shape(), isDefByConstant) &&
llvm::all_of(op.output_shape(), isDefByConstant)) {
int64_t numInputElements = 1;
for (auto operand : op.input_shape())
numInputElements *=
- cast<ConstantIndexOp>(operand->getDefiningOp()).getValue();
+ cast<ConstantIndexOp>(operand.getDefiningOp()).getValue();
int64_t numOutputElements = 1;
for (auto operand : op.output_shape())
numOutputElements *=
- cast<ConstantIndexOp>(operand->getDefiningOp()).getValue();
+ cast<ConstantIndexOp>(operand.getDefiningOp()).getValue();
if (numInputElements != numOutputElements)
return op.emitError("product of input and output shape sizes must match");
}
auto sizesAttr = getVectorSubscriptAttr(*builder, sizes);
auto stridesAttr = getVectorSubscriptAttr(*builder, strides);
result.addTypes(
- inferStridedSliceOpResultType(source->getType().cast<VectorType>(),
+ inferStridedSliceOpResultType(source.getType().cast<VectorType>(),
offsetsAttr, sizesAttr, stridesAttr));
result.addAttribute(getOffsetsAttrName(), offsetsAttr);
result.addAttribute(getSizesAttrName(), sizesAttr);
}
static void print(OpAsmPrinter &p, StridedSliceOp op) {
- p << op.getOperationName() << " " << *op.vector();
+ p << op.getOperationName() << " " << op.vector();
p.printOptionalAttrDict(op.getAttrs());
- p << " : " << op.vector()->getType() << " to " << op.getResult()->getType();
+ p << " : " << op.vector().getType() << " to " << op.getResult().getType();
}
static ParseResult parseStridedSliceOp(OpAsmParser &parser,
auto resultType = inferStridedSliceOpResultType(
op.getVectorType(), op.offsets(), op.sizes(), op.strides());
- if (op.getResult()->getType() != resultType) {
+ if (op.getResult().getType() != resultType) {
op.emitOpError("expected result type to be ") << resultType;
return failure();
}
PatternMatchResult matchAndRewrite(StridedSliceOp stridedSliceOp,
PatternRewriter &rewriter) const override {
// Return if 'stridedSliceOp' operand is not defined by a ConstantMaskOp.
- auto defOp = stridedSliceOp.vector()->getDefiningOp();
+ auto defOp = stridedSliceOp.vector().getDefiningOp();
auto constantMaskOp = dyn_cast_or_null<ConstantMaskOp>(defOp);
if (!constantMaskOp)
return matchFailure();
// Replace 'stridedSliceOp' with ConstantMaskOp with sliced mask region.
rewriter.replaceOpWithNewOp<ConstantMaskOp>(
- stridedSliceOp, stridedSliceOp.getResult()->getType(),
+ stridedSliceOp, stridedSliceOp.getResult().getType(),
vector::getVectorSubscriptAttr(rewriter, sliceMaskDimSizes));
return matchSuccess();
}
// Consistency of elemental types in memref and vector.
MemRefType memrefType = op.getMemRefType();
VectorType vectorType = op.getVectorType();
- auto paddingType = op.padding()->getType();
+ auto paddingType = op.padding().getType();
auto permutationMap = op.permutation_map();
auto memrefElementType = memrefType.getElementType();
// TransferWriteOp
//===----------------------------------------------------------------------===//
static void print(OpAsmPrinter &p, TransferWriteOp op) {
- p << op.getOperationName() << " " << *op.vector() << ", " << *op.memref()
- << "[" << op.indices() << "]";
+ p << op.getOperationName() << " " << op.vector() << ", " << op.memref() << "["
+ << op.indices() << "]";
p.printOptionalAttrDict(op.getAttrs());
p << " : " << op.getVectorType() << ", " << op.getMemRefType();
}
void TypeCastOp::build(Builder *builder, OperationState &result, Value source) {
result.addOperands(source);
result.addTypes(
- inferVectorTypeCastResultType(source->getType().cast<MemRefType>()));
+ inferVectorTypeCastResultType(source.getType().cast<MemRefType>()));
}
static void print(OpAsmPrinter &p, TypeCastOp op) {
- auto type = op.getOperand()->getType().cast<MemRefType>();
- p << op.getOperationName() << ' ' << *op.memref() << " : " << type << " to "
+ auto type = op.getOperand().getType().cast<MemRefType>();
+ p << op.getOperationName() << ' ' << op.memref() << " : " << type << " to "
<< inferVectorTypeCastResultType(type);
}
}
static void print(OpAsmPrinter &p, TupleGetOp op) {
- p << op.getOperationName() << ' ' << *op.getOperand() << ", " << op.index();
+ p << op.getOperationName() << ' ' << op.getOperand() << ", " << op.index();
p.printOptionalAttrDict(op.getAttrs(),
/*elidedAttrs=*/{TupleGetOp::getIndexAttrName()});
- p << " : " << op.getOperand()->getType();
+ p << " : " << op.getOperand().getType();
}
static LogicalResult verify(TupleGetOp op) {
- auto tupleType = op.getOperand()->getType().cast<TupleType>();
+ auto tupleType = op.getOperand().getType().cast<TupleType>();
if (op.getIndex() < 0 ||
op.getIndex() >= static_cast<int64_t>(tupleType.size()))
return op.emitOpError("tuple get index out of range");
static void print(OpAsmPrinter &p, ConstantMaskOp op) {
p << op.getOperationName() << ' ' << op.mask_dim_sizes() << " : "
- << op.getResult()->getType();
+ << op.getResult().getType();
}
static LogicalResult verify(ConstantMaskOp &op) {
// Verify that array attr size matches the rank of the vector result.
- auto resultType = op.getResult()->getType().cast<VectorType>();
+ auto resultType = op.getResult().getType().cast<VectorType>();
if (static_cast<int64_t>(op.mask_dim_sizes().size()) != resultType.getRank())
return op.emitOpError(
"must specify array attr of size equal vector result rank");
static LogicalResult verify(CreateMaskOp op) {
// Verify that an operand was specified for each result vector each dimension.
if (op.getNumOperands() !=
- op.getResult()->getType().cast<VectorType>().getRank())
+ op.getResult().getType().cast<VectorType>().getRank())
return op.emitOpError(
"must specify an operand for each result vector dimension");
return success();
}
static void print(OpAsmPrinter &p, PrintOp op) {
- p << op.getOperationName() << ' ' << *op.source() << " : "
+ p << op.getOperationName() << ' ' << op.source() << " : "
<< op.getPrintType();
}
PatternRewriter &rewriter) const override {
// Return if any of 'createMaskOp' operands are not defined by a constant.
auto is_not_def_by_constant = [](Value operand) {
- return !isa_and_nonnull<ConstantIndexOp>(operand->getDefiningOp());
+ return !isa_and_nonnull<ConstantIndexOp>(operand.getDefiningOp());
};
if (llvm::any_of(createMaskOp.operands(), is_not_def_by_constant))
return matchFailure();
// Gather constant mask dimension sizes.
SmallVector<int64_t, 4> maskDimSizes;
for (auto operand : createMaskOp.operands()) {
- auto defOp = operand->getDefiningOp();
+ auto defOp = operand.getDefiningOp();
maskDimSizes.push_back(cast<ConstantIndexOp>(defOp).getValue());
}
// Replace 'createMaskOp' with ConstantMaskOp.
rewriter.replaceOpWithNewOp<ConstantMaskOp>(
- createMaskOp, createMaskOp.getResult()->getType(),
+ createMaskOp, createMaskOp.getResult().getType(),
vector::getVectorSubscriptAttr(rewriter, maskDimSizes));
return matchSuccess();
}
auto tupleType =
generateExtractSlicesOpResultType(vectorType, sizes, strides, builder);
state.slicesTuple = builder.create<vector::ExtractSlicesOp>(
- initValue->getLoc(), tupleType, initValue, sizes, strides);
+ initValue.getLoc(), tupleType, initValue, sizes, strides);
}
}
if (valueSlice == nullptr) {
// Return tuple element at 'sliceLinearIndex'.
auto tupleIndex = builder.getI64IntegerAttr(sliceLinearIndex);
- auto initValueType = initValue->getType().cast<VectorType>();
+ auto initValueType = initValue.getType().cast<VectorType>();
auto vectorType =
VectorType::get(state.unrolledShape, initValueType.getElementType());
// Initialize 'cache' with slice from 'initValue'.
unsigned resultIndex,
ArrayRef<int64_t> targetShape,
PatternRewriter &builder) {
- auto shapedType = op->getResult(0)->getType().dyn_cast_or_null<ShapedType>();
+ auto shapedType = op->getResult(0).getType().dyn_cast_or_null<ShapedType>();
if (!shapedType || !shapedType.hasStaticShape())
assert(false && "Expected a statically shaped result type");
SmallVector<Type, 4> vectorTupleTypes(resultValueState.numInstances);
SmallVector<Value, 4> vectorTupleValues(resultValueState.numInstances);
for (unsigned i = 0; i < resultValueState.numInstances; ++i) {
- vectorTupleTypes[i] = caches[resultIndex][i]->getType().cast<VectorType>();
+ vectorTupleTypes[i] = caches[resultIndex][i].getType().cast<VectorType>();
vectorTupleValues[i] = caches[resultIndex][i];
}
TupleType tupleType = builder.getTupleType(vectorTupleTypes);
vectorTupleValues);
// Create InsertSlicesOp(Tuple(result_vectors)).
- auto resultVectorType = op->getResult(0)->getType().cast<VectorType>();
+ auto resultVectorType = op->getResult(0).getType().cast<VectorType>();
SmallVector<int64_t, 4> sizes(resultValueState.unrolledShape);
SmallVector<int64_t, 4> strides(resultValueState.unrollFactors.size(), 1);
vectors.resize(numIterators);
unsigned accOperandIndex = vector::ContractionOp::getAccOperandIndex();
for (unsigned i = 0; i < numIterators; ++i) {
- vectors[i].type = contractionOp.getOperand(i)->getType().cast<VectorType>();
+ vectors[i].type = contractionOp.getOperand(i).getType().cast<VectorType>();
vectors[i].indexMap = iterationIndexMapList[i];
vectors[i].operandIndex = i;
vectors[i].isAcc = i == accOperandIndex ? true : false;
std::vector<VectorState> &vectors,
unsigned &resultIndex) {
// Verify that operation and operands all have the same vector shape.
- auto resultType = op->getResult(0)->getType().dyn_cast_or_null<VectorType>();
+ auto resultType = op->getResult(0).getType().dyn_cast_or_null<VectorType>();
assert(resultType && "Expected op with vector result type");
auto resultShape = resultType.getShape();
// Verify that all operands have the same vector type as result.
getAffineConstantExpr(offsets[it.index()], ctx);
auto map = AffineMap::get(/*dimCount=*/1, /*symbolCount=*/0, expr);
sliceIndices[it.index()] = rewriter.create<AffineApplyOp>(
- it.value()->getLoc(), map, ArrayRef<Value>(it.value()));
+ it.value().getLoc(), map, ArrayRef<Value>(it.value()));
}
// Call 'fn' to generate slice 'i' at 'sliceIndices'.
fn(i, sliceIndices);
// Return unless the unique 'xferReadOp' user is an ExtractSlicesOp.
Value xferReadResult = xferReadOp.getResult();
auto extractSlicesOp =
- dyn_cast<vector::ExtractSlicesOp>(*xferReadResult->getUsers().begin());
- if (!xferReadResult->hasOneUse() || !extractSlicesOp)
+ dyn_cast<vector::ExtractSlicesOp>(*xferReadResult.getUsers().begin());
+ if (!xferReadResult.hasOneUse() || !extractSlicesOp)
return matchFailure();
// Get 'sizes' and 'strides' parameters from ExtractSlicesOp user.
if (!xferWriteOp.permutation_map().isIdentity())
return matchFailure();
// Return unless the 'xferWriteOp' 'vector' operand is an 'InsertSlicesOp'.
- auto *vectorDefOp = xferWriteOp.vector()->getDefiningOp();
+ auto *vectorDefOp = xferWriteOp.vector().getDefiningOp();
auto insertSlicesOp = dyn_cast_or_null<vector::InsertSlicesOp>(vectorDefOp);
if (!insertSlicesOp)
return matchFailure();
// Get TupleOp operand of 'insertSlicesOp'.
auto tupleOp = dyn_cast_or_null<vector::TupleOp>(
- insertSlicesOp.vectors()->getDefiningOp());
+ insertSlicesOp.vectors().getDefiningOp());
if (!tupleOp)
return matchFailure();
PatternRewriter &rewriter) const override {
// Return if 'tupleGetOp.vectors' arg was not defined by ExtractSlicesOp.
auto extractSlicesOp = dyn_cast_or_null<vector::ExtractSlicesOp>(
- tupleGetOp.vectors()->getDefiningOp());
+ tupleGetOp.vectors().getDefiningOp());
if (!extractSlicesOp)
return matchFailure();
// Return if 'extractSlicesOp.vector' arg was not defined by InsertSlicesOp.
auto insertSlicesOp = dyn_cast_or_null<vector::InsertSlicesOp>(
- extractSlicesOp.vector()->getDefiningOp());
+ extractSlicesOp.vector().getDefiningOp());
if (!insertSlicesOp)
return matchFailure();
// Return if 'insertSlicesOp.vectors' arg was not defined by TupleOp.
auto tupleOp = dyn_cast_or_null<vector::TupleOp>(
- insertSlicesOp.vectors()->getDefiningOp());
+ insertSlicesOp.vectors().getDefiningOp());
if (!tupleOp)
return matchFailure();
auto &b = ScopedContext::getBuilder();
auto loc = ScopedContext::getLocation();
v = b.create<ConstantIndexOp>(loc, cst.v).getResult();
- t = v->getType();
+ t = v.getType();
}
ValueHandle &mlir::edsc::ValueHandle::operator=(const ValueHandle &other) {
if (lbs.size() != 1 || ubs.size() != 1)
return Optional<ValueHandle>();
- auto *lbDef = lbs.front().getValue()->getDefiningOp();
- auto *ubDef = ubs.front().getValue()->getDefiningOp();
+ auto *lbDef = lbs.front().getValue().getDefiningOp();
+ auto *ubDef = ubs.front().getValue().getDefiningOp();
if (!lbDef || !ubDef)
return Optional<ValueHandle>();
unsigned &numSymbols) {
AffineExpr d;
Value resultVal = nullptr;
- if (auto constant = dyn_cast_or_null<ConstantIndexOp>(val->getDefiningOp())) {
+ if (auto constant = dyn_cast_or_null<ConstantIndexOp>(val.getDefiningOp())) {
d = getAffineConstantExpr(constant.getValue(), context);
} else if (isValidSymbol(val) && !isValidDim(val)) {
d = getAffineSymbolExpr(numSymbols++, context);
static ValueHandle createBinaryHandle(
ValueHandle lhs, ValueHandle rhs,
function_ref<AffineExpr(AffineExpr, AffineExpr)> affCombiner) {
- auto thisType = lhs.getValue()->getType();
- auto thatType = rhs.getValue()->getType();
+ auto thisType = lhs.getValue().getType();
+ auto thatType = rhs.getValue().getType();
assert(thisType == thatType && "cannot mix types in operators");
(void)thisType;
(void)thatType;
using namespace mlir::edsc;
static SmallVector<ValueHandle, 8> getMemRefSizes(Value memRef) {
- MemRefType memRefType = memRef->getType().cast<MemRefType>();
+ MemRefType memRefType = memRef.getType().cast<MemRefType>();
assert(isStrided(memRefType) && "Expected strided MemRef type");
SmallVector<ValueHandle, 8> res;
}
mlir::edsc::MemRefView::MemRefView(Value v) : base(v) {
- assert(v->getType().isa<MemRefType>() && "MemRefType expected");
+ assert(v.getType().isa<MemRefType>() && "MemRefType expected");
auto memrefSizeValues = getMemRefSizes(v);
for (auto &size : memrefSizeValues) {
}
mlir::edsc::VectorView::VectorView(Value v) : base(v) {
- auto vectorType = v->getType().cast<VectorType>();
+ auto vectorType = v.getType().cast<VectorType>();
for (auto s : vectorType.getShape()) {
lbs.push_back(static_cast<index_t>(0));
for (auto ®ion : op->getRegions())
for (auto &block : region)
for (auto arg : block.getArguments())
- visitType(arg->getType());
+ visitType(arg.getType());
// Visit each of the attributes.
for (auto elt : op->getAttrs())
DialectInterfaceCollection<OpAsmDialectInterface> &interfaces) {
auto setArgNameFn = [&](Value arg, StringRef name) {
assert(!valueIDs.count(arg) && "arg numbered multiple times");
- assert(arg.cast<BlockArgument>()->getOwner() == &block &&
+ assert(arg.cast<BlockArgument>().getOwner() == &block &&
"arg not defined in 'block'");
setValueName(arg, name);
};
SmallVector<int, 2> resultGroups(/*Size=*/1, /*Value=*/0);
auto setResultNameFn = [&](Value result, StringRef name) {
assert(!valueIDs.count(result) && "result numbered multiple times");
- assert(result->getDefiningOp() == &op && "result not defined by 'op'");
+ assert(result.getDefiningOp() == &op && "result not defined by 'op'");
setValueName(result, name);
// Record the result number for groups not anchored at 0.
- if (int resultNo = result.cast<OpResult>()->getResultNumber())
+ if (int resultNo = result.cast<OpResult>().getResultNumber())
resultGroups.push_back(resultNo);
};
if (OpAsmOpInterface asmInterface = dyn_cast<OpAsmOpInterface>(&op))
void SSANameState::getResultIDAndNumber(OpResult result, Value &lookupValue,
Optional<int> &lookupResultNo) const {
- Operation *owner = result->getOwner();
+ Operation *owner = result.getOwner();
if (owner->getNumResults() == 1)
return;
- int resultNo = result->getResultNumber();
+ int resultNo = result.getResultNumber();
// If this operation has multiple result groups, we will need to find the
// one corresponding to this result.
interleaveComma(block->getArguments(), [&](BlockArgument arg) {
printValueID(arg);
os << ": ";
- printType(arg->getType());
+ printType(arg.getType());
});
os << ')';
}
[this](Value operand) { printValueID(operand); });
os << " : ";
interleaveComma(succOperands,
- [this](Value operand) { printType(operand->getType()); });
+ [this](Value operand) { printType(operand.getType()); });
os << ')';
}
void Block::dropAllDefinedValueUses() {
for (auto arg : getArguments())
- arg->dropAllUses();
+ arg.dropAllUses();
for (auto &op : *this)
op.dropAllDefinedValueUses();
dropAllUses();
// Ask the dialect to materialize a constant operation for this value.
Attribute attr = it.value().get<Attribute>();
auto *constOp = dialect->materializeConstant(
- cstBuilder, attr, op->getResult(it.index())->getType(), op->getLoc());
+ cstBuilder, attr, op->getResult(it.index()).getType(), op->getLoc());
if (!constOp) {
// Erase any generated constants.
for (Operation *cst : generatedConstants)
auto fnInputTypes = getType().getInputs();
Block &entryBlock = front();
for (unsigned i = 0, e = entryBlock.getNumArguments(); i != e; ++i)
- if (fnInputTypes[i] != entryBlock.getArgument(i)->getType())
+ if (fnInputTypes[i] != entryBlock.getArgument(i).getType())
return emitOpError("type of entry block argument #")
- << i << '(' << entryBlock.getArgument(i)->getType()
+ << i << '(' << entryBlock.getArgument(i).getType()
<< ") must match the type of the corresponding argument in "
<< "function signature(" << fnInputTypes[i] << ')';
if (nOperands < 2)
return success();
- auto type = op->getOperand(0)->getType();
+ auto type = op->getOperand(0).getType();
for (auto opType : llvm::drop_begin(op->getOperandTypes(), 1))
if (opType != type)
return op->emitOpError() << "requires all operands to have the same type";
if (failed(verifyAtLeastNOperands(op, 1)))
return failure();
- auto type = op->getOperand(0)->getType();
+ auto type = op->getOperand(0).getType();
for (auto opType : llvm::drop_begin(op->getOperandTypes(), 1)) {
if (failed(verifyCompatibleShape(opType, type)))
return op->emitOpError() << "requires the same shape for all operands";
failed(verifyAtLeastNResults(op, 1)))
return failure();
- auto type = op->getOperand(0)->getType();
+ auto type = op->getOperand(0).getType();
for (auto resultType : op->getResultTypes()) {
if (failed(verifyCompatibleShape(resultType, type)))
return op->emitOpError()
failed(verifyAtLeastNResults(op, 1)))
return failure();
- auto type = op->getResult(0)->getType();
+ auto type = op->getResult(0).getType();
auto elementType = getElementTypeOrSelf(type);
for (auto resultType : llvm::drop_begin(op->getResultTypes(), 1)) {
if (getElementTypeOrSelf(resultType) != elementType ||
auto operandIt = operands.begin();
for (unsigned i = 0, e = operandCount; i != e; ++i, ++operandIt) {
- if ((*operandIt)->getType() != destBB->getArgument(i)->getType())
+ if ((*operandIt).getType() != destBB->getArgument(i).getType())
return op->emitError() << "type mismatch for bb argument #" << i
<< " of successor #" << succNo;
}
void impl::buildBinaryOp(Builder *builder, OperationState &result, Value lhs,
Value rhs) {
- assert(lhs->getType() == rhs->getType());
+ assert(lhs.getType() == rhs.getType());
result.addOperands({lhs, rhs});
- result.types.push_back(lhs->getType());
+ result.types.push_back(lhs.getType());
}
ParseResult impl::parseOneResultSameOperandTypeOp(OpAsmParser &parser,
// If not all the operand and result types are the same, just use the
// generic assembly form to avoid omitting information in printing.
- auto resultType = op->getResult(0)->getType();
+ auto resultType = op->getResult(0).getType();
if (llvm::any_of(op->getOperandTypes(),
[&](Type type) { return type != resultType; })) {
p.printGenericOp(op);
}
void impl::printCastOp(Operation *op, OpAsmPrinter &p) {
- p << op->getName() << ' ' << *op->getOperand(0);
+ p << op->getName() << ' ' << op->getOperand(0);
p.printOptionalAttrDict(op->getAttrs());
- p << " : " << op->getOperand(0)->getType() << " to "
- << op->getResult(0)->getType();
+ p << " : " << op->getOperand(0).getType() << " to "
+ << op->getResult(0).getType();
}
Value impl::foldCastOp(Operation *op) {
// Identity cast
- if (op->getOperand(0)->getType() == op->getResult(0)->getType())
+ if (op->getOperand(0).getType() == op->getResult(0).getType())
return op->getOperand(0);
return nullptr;
}
// Replace all of the successor arguments with the provided values.
for (auto it : llvm::zip(source->getArguments(), argValues))
- std::get<0>(it)->replaceAllUsesWith(std::get<1>(it));
+ std::get<0>(it).replaceAllUsesWith(std::get<1>(it));
// Splice the operations of the 'source' block into the 'dest' block and erase
// it.
// argument to the cloned block.
for (auto arg : block.getArguments())
if (!mapper.contains(arg))
- mapper.map(arg, newBlock->addArgument(arg->getType()));
+ mapper.map(arg, newBlock->addArgument(arg.getType()));
// Clone and remap the operations within this block.
for (auto &op : block)
// Check that any value that is used by an operation is defined in the
// same region as either an operation result or a block argument.
- if (operand->getParentRegion()->isProperAncestor(&limit)) {
+ if (operand.getParentRegion()->isProperAncestor(&limit)) {
if (noteLoc) {
op.emitOpError("using value defined outside the region")
.attachNote(noteLoc)
}
Type mlir::getElementTypeOrSelf(Value val) {
- return getElementTypeOrSelf(val->getType());
+ return getElementTypeOrSelf(val.getType());
}
Type mlir::getElementTypeOrSelf(Attribute attr) {
it, &unwrap) {}
Type OperandElementTypeIterator::unwrap(Value value) {
- return value->getType().cast<ShapedType>().getElementType();
+ return value.getType().cast<ShapedType>().getElementType();
}
ResultElementTypeIterator::ResultElementTypeIterator(
it, &unwrap) {}
Type ResultElementTypeIterator::unwrap(Value value) {
- return value->getType().cast<ShapedType>().getElementType();
+ return value.getType().cast<ShapedType>().getElementType();
}
/// defines it.
Operation *Value::getDefiningOp() const {
if (auto result = dyn_cast<OpResult>())
- return result->getOwner();
+ return result.getOwner();
return nullptr;
}
-Location Value::getLoc() {
+Location Value::getLoc() const {
if (auto *op = getDefiningOp())
return op->getLoc();
return UnknownLoc::get(getContext());
Region *Value::getParentRegion() {
if (auto *op = getDefiningOp())
return op->getParentRegion();
- return cast<BlockArgument>()->getOwner()->getParent();
+ return cast<BlockArgument>().getOwner()->getParent();
}
//===----------------------------------------------------------------------===//
for (auto &fwd : forwardRefPlaceholders) {
// Drop all uses of undefined forward declared reference and destroy
// defining operation.
- fwd.first->dropAllUses();
- fwd.first->getDefiningOp()->destroy();
+ fwd.first.dropAllUses();
+ fwd.first.getDefiningOp()->destroy();
}
}
// If it was a forward reference, update everything that used it to use
// the actual definition instead, delete the forward ref, and remove it
// from our set of forward references we track.
- existing->replaceAllUsesWith(value);
- existing->getDefiningOp()->destroy();
+ existing.replaceAllUsesWith(value);
+ existing.getDefiningOp()->destroy();
forwardRefPlaceholders.erase(existing);
}
if (useInfo.number < entries.size() && entries[useInfo.number].first) {
auto result = entries[useInfo.number].first;
// Check that the type matches the other uses.
- if (result->getType() == type)
+ if (result.getType() == type)
return result;
emitError(useInfo.loc, "use of value '")
.append(useInfo.name,
"' expects different type than prior uses: ", type, " vs ",
- result->getType())
+ result.getType())
.attachNote(getEncodedSourceLocation(entries[useInfo.number].second))
.append("prior use here");
return nullptr;
// Finally, make sure the existing argument has the correct type.
auto arg = owner->getArgument(nextArgument++);
- if (arg->getType() != type)
+ if (arg.getType() != type)
return emitError("argument and block argument type mismatch");
return addDefinition(useInfo, arg);
});
void handleValueIdentity(Operation *op, CAGSlice &cag) const {
assert(op->getNumResults() == 1);
- if (!isHandledType(op->getResult(0)->getType()))
+ if (!isHandledType(op->getResult(0).getType()))
return;
auto resultNode = cag.getResultAnchor(op, 0);
CAGAnchorNode::TypeTransformRule::DirectStorage);
for (unsigned opIdx = 0, e = op->getNumOperands(); opIdx < e; ++opIdx) {
- if (!isHandledType(op->getOperand(opIdx)->getType()))
+ if (!isHandledType(op->getOperand(opIdx).getType()))
continue;
auto operandNode = cag.getOperandAnchor(op, opIdx);
operandNode->setTypeTransformRule(
}
void handleConstant(Operation *op, CAGSlice &cag) const {
- if (!isHandledType(op->getResult(0)->getType()))
+ if (!isHandledType(op->getResult(0).getType()))
return;
auto resultNode = cag.getResultAnchor(op, 0);
}
void handleTerminal(Operation *op, CAGSlice &cag) const {
- if (!isHandledType(op->getOperand(0)->getType()))
+ if (!isHandledType(op->getOperand(0).getType()))
return;
auto operandNode = cag.getOperandAnchor(op, 0);
operandNode->setTypeTransformRule(
}
void handleStats(Operation *op, CAGSlice &cag) const {
- if (!isHandledType(op->getResult(0)->getType()))
+ if (!isHandledType(op->getResult(0).getType()))
return;
auto argNode = cag.getOperandAnchor(op, 0);
}
void handleAdd(Operation *op, CAGSlice &cag) const {
- if (!isHandledType(op->getResult(0)->getType()))
+ if (!isHandledType(op->getResult(0).getType()))
return;
auto lhs = cag.getOperandAnchor(op, 0);
}
void handleMul(Operation *op, CAGSlice &cag) const {
- if (!isHandledType(op->getResult(0)->getType()))
+ if (!isHandledType(op->getResult(0).getType()))
return;
auto lhs = cag.getOperandAnchor(op, 0);
}
void handleMatMul(Operation *op, CAGSlice &cag) const {
- if (!isHandledType(op->getResult(0)->getType()))
+ if (!isHandledType(op->getResult(0).getType()))
return;
auto lhs = cag.getOperandAnchor(op, 0);
}
void handleMatMulBias(Operation *op, CAGSlice &cag) const {
- if (!isHandledType(op->getResult(0)->getType()))
+ if (!isHandledType(op->getResult(0).getType()))
return;
auto lhs = cag.getOperandAnchor(op, 0);
}
CAGOperandAnchor::CAGOperandAnchor(Operation *op, unsigned operandIdx)
- : CAGAnchorNode(Kind::OperandAnchor, op->getOperand(operandIdx)->getType()),
+ : CAGAnchorNode(Kind::OperandAnchor, op->getOperand(operandIdx).getType()),
op(op), operandIdx(operandIdx) {}
CAGResultAnchor::CAGResultAnchor(Operation *op, unsigned resultIdx)
- : CAGAnchorNode(Kind::ResultAnchor, op->getResult(resultIdx)->getType()),
+ : CAGAnchorNode(Kind::ResultAnchor, op->getResult(resultIdx).getType()),
resultValue(op->getResult(resultIdx)) {}
CAGSlice::CAGSlice(SolverContext &context) : context(context) {}
for (auto &resultAnchorPair : resultAnchors) {
CAGResultAnchor *resultAnchor = resultAnchorPair.second;
Value resultValue = resultAnchor->getValue();
- for (auto &use : resultValue->getUses()) {
+ for (auto &use : resultValue.getUses()) {
Operation *operandOp = use.getOwner();
unsigned operandIdx = use.getOperandNumber();
auto foundIt = operandAnchors.find(std::make_pair(operandOp, operandIdx));
// Insert stats for each argument.
for (auto arg : func.getArguments()) {
- if (!config.isHandledType(arg->getType()))
+ if (!config.isHandledType(arg.getType()))
continue;
OpBuilder b(func.getBody());
APFloat minValue(-1.0f);
RankedTensorType::get({2}, b.getF32Type()), {minValue, maxValue});
auto statsOp = b.create<StatisticsOp>(func.getLoc(), arg, layerStats,
nullptr, nullptr);
- arg->replaceAllUsesWith(statsOp);
+ arg.replaceAllUsesWith(statsOp);
// StatsOp contained a use to 'arg' so make sure to reset it after replacing
// all of the uses of 'arg'.
assert(op->getNumResults() == 1);
auto originalResult = op->getResult(0);
- if (!config.isHandledType(originalResult->getType()))
+ if (!config.isHandledType(originalResult.getType()))
return;
OpBuilder b(op->getBlock(), ++op->getIterator());
RankedTensorType::get({2}, b.getF32Type()), {minValue, maxValue});
auto statsOp = b.create<StatisticsOp>(op->getLoc(), op->getResult(0),
layerStats, nullptr, nullptr);
- originalResult->replaceAllUsesWith(statsOp);
+ originalResult.replaceAllUsesWith(statsOp);
// StatsOp contained a use to 'op' so make sure to reset it after replacing
// all of the uses of 'op'.
// bulk in the IR.
Value newTypedInputValue = inputValue;
auto inputDcastOp =
- dyn_cast_or_null<DequantizeCastOp>(inputValue->getDefiningOp());
- if (inputDcastOp && inputDcastOp.arg()->getType() == newType) {
+ dyn_cast_or_null<DequantizeCastOp>(inputValue.getDefiningOp());
+ if (inputDcastOp && inputDcastOp.arg().getType() == newType) {
// Can just use the dcast's input value.
newTypedInputValue = inputDcastOp.arg();
removeValuesIfDead.push_back(inputDcastOp);
}
for (Value removeValueIfDead : removeValuesIfDead) {
- if (removeValueIfDead->use_empty()) {
- removeValueIfDead->getDefiningOp()->erase();
+ if (removeValueIfDead.use_empty()) {
+ removeValueIfDead.getDefiningOp()->erase();
}
}
}
void InferQuantizedTypesPass::transformResultType(CAGResultAnchor *anchor,
Type newType) {
Value origResultValue = anchor->getValue();
- Operation *op = origResultValue->getDefiningOp();
+ Operation *op = origResultValue.getDefiningOp();
OpBuilder b(op->getBlock(), ++Block::iterator(op));
Value replacedResultValue = nullptr;
Value newResultValue = nullptr;
switch (anchor->getTypeTransformRule()) {
case CAGAnchorNode::TypeTransformRule::Direct:
- origResultValue->setType(newType);
+ origResultValue.setType(newType);
replacedResultValue = newResultValue = b.create<DequantizeCastOp>(
op->getLoc(), anchor->getOriginalType(), origResultValue);
break;
Type storageType = QuantizedType::castToStorageType(newType);
if (failed(validateTypeConversion(storageType, newType, op)))
return;
- origResultValue->setType(storageType);
+ origResultValue.setType(storageType);
replacedResultValue =
b.create<StorageCastOp>(op->getLoc(), newType, origResultValue);
newResultValue = b.create<DequantizeCastOp>(
// newResultValue -> [original uses]
// Note that replaceResultValue may equal newResultValue or there may
// be operands between the two.
- origResultValue->replaceAllUsesWith(newResultValue);
- replacedResultValue->getDefiningOp()->replaceUsesOfWith(newResultValue,
- origResultValue);
+ origResultValue.replaceAllUsesWith(newResultValue);
+ replacedResultValue.getDefiningOp()->replaceUsesOfWith(newResultValue,
+ origResultValue);
}
}
assert(instMap.count(llvmAndUnknown.first));
Value newValue = instMap[llvmAndUnknown.first];
Value oldValue = llvmAndUnknown.second->getResult(0);
- oldValue->replaceAllUsesWith(newValue);
+ oldValue.replaceAllUsesWith(newValue);
llvmAndUnknown.second->erase();
}
return success();
unsigned numPredecessors =
std::distance(predecessors.begin(), predecessors.end());
for (auto arg : bb.getArguments()) {
- auto wrappedType = arg->getType().dyn_cast<LLVM::LLVMType>();
+ auto wrappedType = arg.getType().dyn_cast<LLVM::LLVMType>();
if (!wrappedType)
return emitError(bb.front().getLoc(),
"block argument does not have an LLVM type");
if (auto attr = func.getArgAttrOfType<BoolAttr>(argIdx, "llvm.noalias")) {
// NB: Attribute already verified to be boolean, so check if we can indeed
// attach the attribute to this argument, based on its type.
- auto argTy = mlirArg->getType().dyn_cast<LLVM::LLVMType>();
+ auto argTy = mlirArg.getType().dyn_cast<LLVM::LLVMType>();
if (!argTy.getUnderlyingType()->isPointerTy())
return func.emitError(
"llvm.noalias attribute attached to LLVM non-pointer argument");
Value memref = isa<AffineLoadOp>(op)
? cast<AffineLoadOp>(op).getMemRef()
: cast<AffineStoreOp>(op).getMemRef();
- for (auto *user : memref->getUsers()) {
+ for (auto *user : memref.getUsers()) {
// If this memref has a user that is a DMA, give up because these
// operations write to this memref.
if (isa<AffineDmaStartOp>(op) || isa<AffineDmaWaitOp>(op)) {
return false;
}
for (unsigned int i = 0; i < op.getNumOperands(); ++i) {
- auto *operandSrc = op.getOperand(i)->getDefiningOp();
+ auto *operandSrc = op.getOperand(i).getDefiningOp();
LLVM_DEBUG(
- op.getOperand(i)->print(llvm::dbgs() << "\nIterating on operand\n"));
+ op.getOperand(i).print(llvm::dbgs() << "\nIterating on operand\n"));
// If the loop IV is the operand, this op isn't loop invariant.
if (indVar == op.getOperand(i)) {
// Drop all uses of the original arguments and delete the original block.
Block *origBlock = it->second.origBlock;
for (BlockArgument arg : origBlock->getArguments())
- arg->dropAllUses();
+ arg.dropAllUses();
conversionInfo.erase(it);
}
}
// Drop all uses of the new block arguments and replace uses of the new block.
for (int i = block->getNumArguments() - 1; i >= 0; --i)
- block->getArgument(i)->dropAllUses();
+ block->getArgument(i).dropAllUses();
block->replaceAllUsesWith(origBlock);
// Move the operations back the original block and the delete the new block.
// replace all uses.
auto argReplacementValue = mapping.lookupOrDefault(origArg);
if (argReplacementValue != origArg) {
- origArg->replaceAllUsesWith(argReplacementValue);
+ origArg.replaceAllUsesWith(argReplacementValue);
continue;
}
// If there are any dangling uses then replace the argument with one
// generated by the type converter. This is necessary as the cast must
// persist in the IR after conversion.
- if (!origArg->use_empty()) {
+ if (!origArg.use_empty()) {
rewriter.setInsertionPointToStart(newBlock);
auto *newOp = typeConverter->materializeConversion(
- rewriter, origArg->getType(), llvm::None, loc);
- origArg->replaceAllUsesWith(newOp->getResult(0));
+ rewriter, origArg.getType(), llvm::None, loc);
+ origArg.replaceAllUsesWith(newOp->getResult(0));
}
continue;
}
// type are the same, otherwise it should force a conversion to be
// materialized.
if (argInfo->newArgSize == 1) {
- origArg->replaceAllUsesWith(
+ origArg.replaceAllUsesWith(
mapping.lookupOrDefault(newBlock->getArgument(argInfo->newArgIdx)));
continue;
}
assert(argInfo->newArgSize > 1 && castValue && "expected 1->N mapping");
// If the argument is still used, replace it with the generated cast.
- if (!origArg->use_empty())
- origArg->replaceAllUsesWith(mapping.lookupOrDefault(castValue));
+ if (!origArg.use_empty())
+ origArg.replaceAllUsesWith(mapping.lookupOrDefault(castValue));
// If all users of the cast were removed, we can drop it. Otherwise, keep
// the operation alive and let the user handle any remaining usages.
- if (castValue->use_empty())
- castValue->getDefiningOp()->erase();
+ if (castValue.use_empty())
+ castValue.getDefiningOp()->erase();
}
}
}
// to pack the new values.
auto replArgs = newArgs.slice(inputMap->inputNo, inputMap->size);
Operation *cast = typeConverter->materializeConversion(
- rewriter, origArg->getType(), replArgs, loc);
+ rewriter, origArg.getType(), replArgs, loc);
assert(cast->getNumResults() == 1 &&
cast->getNumOperands() == replArgs.size());
mapping.map(origArg, cast->getResult(0));
for (auto &repl : replacements) {
for (unsigned i = 0, e = repl.newValues.size(); i != e; ++i) {
if (auto newValue = repl.newValues[i])
- repl.op->getResult(i)->replaceAllUsesWith(
+ repl.op->getResult(i).replaceAllUsesWith(
mapping.lookupOrDefault(newValue));
}
void ConversionPatternRewriter::replaceUsesOfBlockArgument(BlockArgument from,
Value to) {
- for (auto &u : from->getUses()) {
- if (u.getOwner() == to->getDefiningOp())
+ for (auto &u : from.getUses()) {
+ if (u.getOwner() == to.getDefiningOp())
continue;
u.getOwner()->replaceUsesOfWith(from, to);
}
-> Optional<SignatureConversion> {
SignatureConversion conversion(block->getNumArguments());
for (unsigned i = 0, e = block->getNumArguments(); i != e; ++i)
- if (failed(convertSignatureArg(i, block->getArgument(i)->getType(),
+ if (failed(convertSignatureArg(i, block->getArgument(i).getType(),
conversion)))
return llvm::None;
return conversion;
Node *node = getNode(id);
for (auto *storeOpInst : node->stores) {
auto memref = cast<AffineStoreOp>(storeOpInst).getMemRef();
- auto *op = memref->getDefiningOp();
+ auto *op = memref.getDefiningOp();
// Return true if 'memref' is a block argument.
if (!op)
return true;
// Return true if any use of 'memref' escapes the function.
- for (auto *user : memref->getUsers())
+ for (auto *user : memref.getUsers())
if (!isMemRefDereferencingOp(*user))
return true;
}
if (!hasEdge(srcId, dstId, value)) {
outEdges[srcId].push_back({dstId, value});
inEdges[dstId].push_back({srcId, value});
- if (value->getType().isa<MemRefType>())
+ if (value.getType().isa<MemRefType>())
memrefEdgeCount[value]++;
}
}
void removeEdge(unsigned srcId, unsigned dstId, Value value) {
assert(inEdges.count(dstId) > 0);
assert(outEdges.count(srcId) > 0);
- if (value->getType().isa<MemRefType>()) {
+ if (value.getType().isa<MemRefType>()) {
assert(memrefEdgeCount.count(value) > 0);
memrefEdgeCount[value]--;
}
const std::function<void(Edge)> &callback) {
for (auto &edge : edges) {
// Skip if 'edge' is not a memref dependence edge.
- if (!edge.value->getType().isa<MemRefType>())
+ if (!edge.value.getType().isa<MemRefType>())
continue;
assert(nodes.count(edge.id) > 0);
// Skip if 'edge.id' is not a loop nest.
continue;
auto *opInst = node.op;
for (auto value : opInst->getResults()) {
- for (auto *user : value->getUsers()) {
+ for (auto *user : value.getUsers()) {
SmallVector<AffineForOp, 4> loops;
getLoopIVs(*user, &loops);
if (loops.empty())
OpBuilder top(forInst->getParentOfType<FuncOp>().getBody());
// Create new memref type based on slice bounds.
auto oldMemRef = cast<AffineStoreOp>(srcStoreOpInst).getMemRef();
- auto oldMemRefType = oldMemRef->getType().cast<MemRefType>();
+ auto oldMemRefType = oldMemRef.getType().cast<MemRefType>();
unsigned rank = oldMemRefType.getRank();
// Compute MemRefRegion for 'srcStoreOpInst' at depth 'dstLoopDepth'.
visitedMemrefs.insert(newMemRef);
// Create new node in dependence graph for 'newMemRef' alloc op.
unsigned newMemRefNodeId =
- mdg->addNode(newMemRef->getDefiningOp());
+ mdg->addNode(newMemRef.getDefiningOp());
// Add edge from 'newMemRef' node to dstNode.
mdg->addEdge(newMemRefNodeId, dstId, newMemRef);
}
// Search for siblings which load the same memref function argument.
auto fn = dstNode->op->getParentOfType<FuncOp>();
for (unsigned i = 0, e = fn.getNumArguments(); i != e; ++i) {
- for (auto *user : fn.getArgument(i)->getUsers()) {
+ for (auto *user : fn.getArgument(i).getUsers()) {
if (auto loadOp = dyn_cast<AffineLoadOp>(user)) {
// Gather loops surrounding 'use'.
SmallVector<AffineForOp, 4> loops;
continue;
auto memref = pair.first;
// Skip if there exist other uses (return operation or function calls).
- if (!memref->use_empty())
+ if (!memref.use_empty())
continue;
// Use list expected to match the dep graph info.
- auto *op = memref->getDefiningOp();
+ auto *op = memref.getDefiningOp();
if (isa_and_nonnull<AllocOp>(op))
op->erase();
}
// Helper to check whether an operation is loop invariant wrt. SSA properties.
auto isDefinedOutsideOfBody = [&](Value value) {
- auto definingOp = value->getDefiningOp();
+ auto definingOp = value.getDefiningOp();
return (definingOp && !!willBeMovedSet.count(definingOp)) ||
looplike.isDefinedOutsideOfLoop(value);
};
constructTiledIndexSetHyperRect(origLoops, newLoops, tileSizes);
// In this case, the point loop IVs just replace the original ones.
for (unsigned i = 0; i < width; i++) {
- origLoopIVs[i]->replaceAllUsesWith(newLoops[i + width].getInductionVar());
+ origLoopIVs[i].replaceAllUsesWith(newLoops[i + width].getInductionVar());
}
// Erase the old loop nest.
// If the induction variable is used, create a remapping to the value for
// this unrolled instance.
- if (!forOpIV->use_empty()) {
+ if (!forOpIV.use_empty()) {
// iv' = iv + i, i = 1 to unrollJamFactor-1.
auto d0 = builder.getAffineDimExpr(0);
auto bumpMap = AffineMap::get(1, 0, {d0 + i * step});
// all store ops.
SmallVector<Operation *, 8> storeOps;
unsigned minSurroundingLoops = getNestingDepth(*loadOpInst);
- for (auto *user : loadOp.getMemRef()->getUsers()) {
+ for (auto *user : loadOp.getMemRef().getUsers()) {
auto storeOp = dyn_cast<AffineStoreOp>(user);
if (!storeOp)
continue;
// to do this as well, but we'll do it here since we collected these anyway.
for (auto memref : memrefsToErase) {
// If the memref hasn't been alloc'ed in this function, skip.
- Operation *defInst = memref->getDefiningOp();
+ Operation *defInst = memref.getDefiningOp();
if (!defInst || !isa<AllocOp>(defInst))
// TODO(mlir-team): if the memref was returned by a 'call' operation, we
// could still erase it if the call had no side-effects.
continue;
- if (llvm::any_of(memref->getUsers(), [&](Operation *ownerInst) {
+ if (llvm::any_of(memref.getUsers(), [&](Operation *ownerInst) {
return (!isa<AffineStoreOp>(ownerInst) && !isa<DeallocOp>(ownerInst));
}))
continue;
// Erase all stores, the dealloc, and the alloc on the memref.
- for (auto *user : llvm::make_early_inc_range(memref->getUsers()))
+ for (auto *user : llvm::make_early_inc_range(memref.getUsers()))
user->erase();
defInst->erase();
}
return newMemRefType;
};
- auto oldMemRefType = oldMemRef->getType().cast<MemRefType>();
+ auto oldMemRefType = oldMemRef.getType().cast<MemRefType>();
auto newMemRefType = doubleShape(oldMemRefType);
// The double buffer is allocated right before 'forInst'.
// We only double buffer if the buffer is not live out of loop.
auto memref = dmaStartOp.getOperand(dmaStartOp.getFasterMemPos());
bool escapingUses = false;
- for (auto *user : memref->getUsers()) {
+ for (auto *user : memref.getUsers()) {
// We can double buffer regardless of dealloc's outside the loop.
if (isa<DeallocOp>(user))
continue;
// order to create the double buffer above.)
// '-canonicalize' does this in a more general way, but we'll anyway do the
// simple/common case so that the output / test cases looks clear.
- if (auto *allocInst = oldMemRef->getDefiningOp()) {
- if (oldMemRef->use_empty()) {
+ if (auto *allocInst = oldMemRef.getDefiningOp()) {
+ if (oldMemRef.use_empty()) {
allocInst->erase();
- } else if (oldMemRef->hasOneUse()) {
- if (auto dealloc = dyn_cast<DeallocOp>(*oldMemRef->user_begin())) {
+ } else if (oldMemRef.hasOneUse()) {
+ if (auto dealloc = dyn_cast<DeallocOp>(*oldMemRef.user_begin())) {
dealloc.erase();
allocInst->erase();
}
}
// If the old tag has no uses or a single dealloc use, remove it.
// (canonicalization handles more complex cases).
- if (auto *tagAllocInst = oldTagMemRef->getDefiningOp()) {
- if (oldTagMemRef->use_empty()) {
+ if (auto *tagAllocInst = oldTagMemRef.getDefiningOp()) {
+ if (oldTagMemRef.use_empty()) {
tagAllocInst->erase();
- } else if (oldTagMemRef->hasOneUse()) {
- if (auto dealloc = dyn_cast<DeallocOp>(*oldTagMemRef->user_begin())) {
+ } else if (oldTagMemRef.hasOneUse()) {
+ if (auto dealloc = dyn_cast<DeallocOp>(*oldTagMemRef.user_begin())) {
dealloc.erase();
tagAllocInst->erase();
}
// Otherwise, replace all of the result values and erase the operation.
for (unsigned i = 0, e = results.size(); i != e; ++i)
- op->getResult(i)->replaceAllUsesWith(results[i]);
+ op->getResult(i).replaceAllUsesWith(results[i]);
op->erase();
return success();
}
auto &uniquedConstants = foldScopes[getInsertionRegion(interfaces, op)];
// Erase all of the references to this operation.
- auto type = op->getResult(0)->getType();
+ auto type = op->getResult(0).getType();
for (auto *dialect : it->second)
uniquedConstants.erase(std::make_tuple(dialect, constValue, type));
referencedDialects.erase(it);
Attribute attrRepl = foldResults[i].get<Attribute>();
if (auto *constOp =
tryGetOrCreateConstant(uniquedConstants, dialect, builder, attrRepl,
- res->getType(), op->getLoc())) {
+ res.getType(), op->getLoc())) {
results.push_back(constOp->getResult(0));
continue;
}
// before the root is changed.
void notifyRootReplaced(Operation *op) override {
for (auto result : op->getResults())
- for (auto *user : result->getUsers())
+ for (auto *user : result.getUsers())
addToWorklist(user);
}
// TODO(riverriddle) This is based on the fact that zero use operations
// may be deleted, and that single use values often have more
// canonicalization opportunities.
- if (!operand->use_empty() && !operand->hasOneUse())
+ if (!operand.use_empty() && !operand.hasOneUse())
continue;
- if (auto *defInst = operand->getDefiningOp())
+ if (auto *defInst = operand.getDefiningOp())
addToWorklist(defInst);
}
}
// Add all the users of the result to the worklist so we make sure
// to revisit them.
for (auto result : op->getResults())
- for (auto *operand : result->getUsers())
+ for (auto *operand : result.getUsers())
addToWorklist(operand);
notifyOperationRemoved(op);
// Otherwise, there were multiple blocks inlined. Add arguments to the post
// insertion block to represent the results to replace.
for (Value resultToRepl : resultsToReplace) {
- resultToRepl->replaceAllUsesWith(
- postInsertBlock->addArgument(resultToRepl->getType()));
+ resultToRepl.replaceAllUsesWith(
+ postInsertBlock->addArgument(resultToRepl.getType()));
}
/// Handle the terminators for each of the new blocks.
// Verify that the types of the provided values match the function argument
// types.
BlockArgument regionArg = entryBlock->getArgument(i);
- if (inlinedOperands[i]->getType() != regionArg->getType())
+ if (inlinedOperands[i].getType() != regionArg.getType())
return failure();
mapper.map(regionArg, inlinedOperands[i]);
}
// Functor used to cleanup generated state on failure.
auto cleanupState = [&] {
for (auto *op : castOps) {
- op->getResult(0)->replaceAllUsesWith(op->getOperand(0));
+ op->getResult(0).replaceAllUsesWith(op->getOperand(0));
op->erase();
}
return failure();
// If the call operand doesn't match the expected region argument, try to
// generate a cast.
- Type regionArgType = regionArg->getType();
- if (operand->getType() != regionArgType) {
+ Type regionArgType = regionArg.getType();
+ if (operand.getType() != regionArgType) {
if (!(operand = materializeConversion(callInterface, castOps, castBuilder,
operand, regionArgType, castLoc)))
return cleanupState();
castBuilder.setInsertionPointAfter(call);
for (unsigned i = 0, e = callResults.size(); i != e; ++i) {
Value callResult = callResults[i];
- if (callResult->getType() == callableResultTypes[i])
+ if (callResult.getType() == callableResultTypes[i])
continue;
// Generate a conversion that will produce the original type, so that the IR
// is still valid after the original call gets replaced.
Value castResult =
materializeConversion(callInterface, castOps, castBuilder, callResult,
- callResult->getType(), castLoc);
+ callResult.getType(), castLoc);
if (!castResult)
return cleanupState();
- callResult->replaceAllUsesWith(castResult);
- castResult->getDefiningOp()->replaceUsesOfWith(castResult, callResult);
+ callResult.replaceAllUsesWith(castResult);
+ castResult.getDefiningOp()->replaceUsesOfWith(castResult, callResult);
}
// Attempt to inline the call.
return WalkResult::advance();
}
for (auto value : op->getResults()) {
- for (auto user : value->getUsers()) {
+ for (auto user : value.getUsers()) {
SmallVector<AffineForOp, 4> loops;
// Check if any loop in loop nest surrounding 'user' is 'opB'.
getLoopIVs(*user, &loops);
// Subtract out any load users of 'storeMemrefs' nested below
// 'insertPointParent'.
for (auto value : storeMemrefs) {
- for (auto *user : value->getUsers()) {
+ for (auto *user : value.getUsers()) {
if (auto loadOp = dyn_cast<AffineLoadOp>(user)) {
SmallVector<AffineForOp, 4> loops;
// Check if any loop in loop nest surrounding 'user' is
canonicalizeMapAndOperands(map, operands);
// Remove any affine.apply's that became dead from the simplification above.
for (auto v : bumpValues) {
- if (v->use_empty()) {
- v->getDefiningOp()->erase();
- }
+ if (v.use_empty())
+ v.getDefiningOp()->erase();
}
if (lb.use_empty())
lb.erase();
// Replaces all IV uses to its single iteration value.
auto iv = forOp.getInductionVar();
Operation *op = forOp.getOperation();
- if (!iv->use_empty()) {
+ if (!iv.use_empty()) {
if (forOp.hasConstantLowerBound()) {
OpBuilder topBuilder(op->getParentOfType<FuncOp>().getBody());
auto constOp = topBuilder.create<ConstantIndexOp>(
forOp.getLoc(), forOp.getConstantLowerBound());
- iv->replaceAllUsesWith(constOp);
+ iv.replaceAllUsesWith(constOp);
} else {
AffineBound lb = forOp.getLowerBound();
SmallVector<Value, 4> lbOperands(lb.operand_begin(), lb.operand_end());
OpBuilder builder(op->getBlock(), Block::iterator(op));
if (lb.getMap() == builder.getDimIdentityMap()) {
// No need of generating an affine.apply.
- iv->replaceAllUsesWith(lbOperands[0]);
+ iv.replaceAllUsesWith(lbOperands[0]);
} else {
auto affineApplyOp = builder.create<AffineApplyOp>(
op->getLoc(), lb.getMap(), lbOperands);
- iv->replaceAllUsesWith(affineApplyOp);
+ iv.replaceAllUsesWith(affineApplyOp);
}
}
}
// remapped to results of cloned operations, and their IV used remapped.
// Generate the remapping if the shift is not zero: remappedIV = newIV -
// shift.
- if (!srcIV->use_empty() && shift != 0) {
+ if (!srcIV.use_empty() && shift != 0) {
auto ivRemap = bodyBuilder.create<AffineApplyOp>(
srcForInst.getLoc(),
bodyBuilder.getSingleDimShiftAffineMap(
// If the induction variable is used, create a remapping to the value for
// this unrolled instance.
- if (!forOpIV->use_empty()) {
+ if (!forOpIV.use_empty()) {
// iv' = iv + 1/2/3...unrollFactor-1;
auto d0 = builder.getAffineDimExpr(0);
auto bumpMap = AffineMap::get(1, 0, {d0 + i * step});
static Value ceilDivPositive(OpBuilder &builder, Location loc, Value dividend,
int64_t divisor) {
assert(divisor > 0 && "expected positive divisor");
- assert(dividend->getType().isIndex() && "expected index-typed value");
+ assert(dividend.getType().isIndex() && "expected index-typed value");
Value divisorMinusOneCst = builder.create<ConstantIndexOp>(loc, divisor - 1);
Value divisorCst = builder.create<ConstantIndexOp>(loc, divisor);
// where divis is rounding-to-zero division.
static Value ceilDivPositive(OpBuilder &builder, Location loc, Value dividend,
Value divisor) {
- assert(dividend->getType().isIndex() && "expected index-typed value");
+ assert(dividend.getType().isIndex() && "expected index-typed value");
Value cstOne = builder.create<ConstantIndexOp>(loc, 1);
Value divisorMinusOne = builder.create<SubIOp>(loc, divisor, cstOne);
static void
replaceAllUsesExcept(Value orig, Value replacement,
const SmallPtrSetImpl<Operation *> &exceptions) {
- for (auto &use : llvm::make_early_inc_range(orig->getUses())) {
+ for (auto &use : llvm::make_early_inc_range(orig.getUses())) {
if (exceptions.count(use.getOwner()) == 0)
use.set(replacement);
}
// a constant one step.
bool isZeroBased = false;
if (auto ubCst =
- dyn_cast_or_null<ConstantIndexOp>(loop.lowerBound()->getDefiningOp()))
+ dyn_cast_or_null<ConstantIndexOp>(loop.lowerBound().getDefiningOp()))
isZeroBased = ubCst.getValue() == 0;
bool isStepOne = false;
if (auto stepCst =
- dyn_cast_or_null<ConstantIndexOp>(loop.step()->getDefiningOp()))
+ dyn_cast_or_null<ConstantIndexOp>(loop.step().getDefiningOp()))
isStepOne = stepCst.getValue() == 1;
if (isZeroBased && isStepOne)
Value shifted =
isZeroBased ? scaled : builder.create<AddIOp>(loc, scaled, lb);
- SmallPtrSet<Operation *, 2> preserve{scaled->getDefiningOp(),
- shifted->getDefiningOp()};
+ SmallPtrSet<Operation *, 2> preserve{scaled.getDefiningOp(),
+ shifted.getDefiningOp()};
replaceAllUsesExcept(loop.getInductionVar(), shifted, preserve);
}
int64_t numEltPerStride = 1;
int64_t stride = 1;
for (int d = bufferShape.size() - 1; d >= 1; d--) {
- int64_t dimSize = region.memref->getType().cast<MemRefType>().getDimSize(d);
+ int64_t dimSize = region.memref.getType().cast<MemRefType>().getDimSize(d);
stride *= dimSize;
numEltPerStride *= bufferShape[d];
// A stride is needed only if the region has a shorter extent than the
auto loc = region.loc;
auto memref = region.memref;
- auto memRefType = memref->getType().cast<MemRefType>();
+ auto memRefType = memref.getType().cast<MemRefType>();
auto layoutMaps = memRefType.getAffineMaps();
if (layoutMaps.size() > 1 ||
assert(false && "expected load or store op");
return false;
}
- auto memRefType = region->memref->getType().cast<MemRefType>();
+ auto memRefType = region->memref.getType().cast<MemRefType>();
if (!memRefType.hasStaticShape())
return false;
void mlir::replaceAllUsesInRegionWith(Value orig, Value replacement,
Region ®ion) {
- for (auto &use : llvm::make_early_inc_range(orig->getUses())) {
+ for (auto &use : llvm::make_early_inc_range(orig.getUses())) {
if (region.isAncestor(use.getOwner()->getParentRegion()))
use.set(replacement);
}
region.walk([callback, &properAncestors](Operation *op) {
for (OpOperand &operand : op->getOpOperands())
// Callback on values defined in a proper ancestor of region.
- if (properAncestors.count(operand.get()->getParentRegion()))
+ if (properAncestors.count(operand.get().getParentRegion()))
callback(&operand);
});
}
}
static void processValue(Value value, LiveMap &liveMap) {
- bool provedLive = llvm::any_of(value->getUses(), [&](OpOperand &use) {
+ bool provedLive = llvm::any_of(value.getUses(), [&](OpOperand &use) {
if (isUseSpeciallyKnownDead(use, liveMap))
return false;
return liveMap.wasProvenLive(use.getOwner());
AffineMap indexRemap,
ArrayRef<Value> extraOperands,
ArrayRef<Value> symbolOperands) {
- unsigned newMemRefRank = newMemRef->getType().cast<MemRefType>().getRank();
+ unsigned newMemRefRank = newMemRef.getType().cast<MemRefType>().getRank();
(void)newMemRefRank; // unused in opt mode
- unsigned oldMemRefRank = oldMemRef->getType().cast<MemRefType>().getRank();
+ unsigned oldMemRefRank = oldMemRef.getType().cast<MemRefType>().getRank();
(void)oldMemRefRank; // unused in opt mode
if (indexRemap) {
assert(indexRemap.getNumSymbols() == symbolOperands.size() &&
}
// Assert same elemental type.
- assert(oldMemRef->getType().cast<MemRefType>().getElementType() ==
- newMemRef->getType().cast<MemRefType>().getElementType());
+ assert(oldMemRef.getType().cast<MemRefType>().getElementType() ==
+ newMemRef.getType().cast<MemRefType>().getElementType());
if (!isMemRefDereferencingOp(*op))
// Failure: memref used in a non-dereferencing context (potentially
// Prepend 'extraIndices' in 'newMapOperands'.
for (auto extraIndex : extraIndices) {
- assert(extraIndex->getDefiningOp()->getNumResults() == 1 &&
+ assert(extraIndex.getDefiningOp()->getNumResults() == 1 &&
"single result op's expected to generate these indices");
assert((isValidDim(extraIndex) || isValidSymbol(extraIndex)) &&
"invalid memory op index");
canonicalizeMapAndOperands(&newMap, &newMapOperands);
// Remove any affine.apply's that became dead as a result of composition.
for (auto value : affineApplyOps)
- if (value->use_empty())
- value->getDefiningOp()->erase();
+ if (value.use_empty())
+ value.getDefiningOp()->erase();
// Construct the new operation using this memref.
OperationState state(op->getLoc(), op->getName());
// Result types don't change. Both memref's are of the same elemental type.
state.types.reserve(op->getNumResults());
for (auto result : op->getResults())
- state.types.push_back(result->getType());
+ state.types.push_back(result.getType());
// Add attribute for 'newMap', other Attributes do not change.
auto newMapAttr = AffineMapAttr::get(newMap);
ArrayRef<Value> symbolOperands,
Operation *domInstFilter,
Operation *postDomInstFilter) {
- unsigned newMemRefRank = newMemRef->getType().cast<MemRefType>().getRank();
+ unsigned newMemRefRank = newMemRef.getType().cast<MemRefType>().getRank();
(void)newMemRefRank; // unused in opt mode
- unsigned oldMemRefRank = oldMemRef->getType().cast<MemRefType>().getRank();
+ unsigned oldMemRefRank = oldMemRef.getType().cast<MemRefType>().getRank();
(void)oldMemRefRank;
if (indexRemap) {
assert(indexRemap.getNumSymbols() == symbolOperands.size() &&
}
// Assert same elemental type.
- assert(oldMemRef->getType().cast<MemRefType>().getElementType() ==
- newMemRef->getType().cast<MemRefType>().getElementType());
+ assert(oldMemRef.getType().cast<MemRefType>().getElementType() ==
+ newMemRef.getType().cast<MemRefType>().getElementType());
std::unique_ptr<DominanceInfo> domInfo;
std::unique_ptr<PostDominanceInfo> postDomInfo;
// DenseSet since an operation could potentially have multiple uses of a
// memref (although rare), and the replacement later is going to erase ops.
DenseSet<Operation *> opsToReplace;
- for (auto *op : oldMemRef->getUsers()) {
+ for (auto *op : oldMemRef.getUsers()) {
// Skip this use if it's not dominated by domInstFilter.
if (domInstFilter && !domInfo->dominates(domInstFilter, op))
continue;
SmallVector<Value, 4> subOperands;
subOperands.reserve(opInst->getNumOperands());
for (auto operand : opInst->getOperands())
- if (isa_and_nonnull<AffineApplyOp>(operand->getDefiningOp()))
+ if (isa_and_nonnull<AffineApplyOp>(operand.getDefiningOp()))
subOperands.push_back(operand);
// Gather sequence of AffineApplyOps reachable from 'subOperands'.
bool localized = true;
for (auto *op : affineApplyOps) {
for (auto result : op->getResults()) {
- for (auto *user : result->getUsers()) {
+ for (auto *user : result.getUsers()) {
if (user != opInst) {
localized = false;
break;
}
// Replace any uses of the original alloc op and erase it. All remaining uses
// have to be dealloc's; RAMUW above would've failed otherwise.
- assert(std::all_of(oldMemRef->user_begin(), oldMemRef->user_end(),
- [](Operation *op) { return isa<DeallocOp>(op); }));
- oldMemRef->replaceAllUsesWith(newAlloc);
+ assert(llvm::all_of(oldMemRef.getUsers(),
+ [](Operation *op) { return isa<DeallocOp>(op); }));
+ oldMemRef.replaceAllUsesWith(newAlloc);
allocOp.erase();
return success();
}
static LogicalResult vectorizeRootOrTerminal(Value iv,
LoadOrStoreOpPointer memoryOp,
VectorizationState *state) {
- auto memRefType = memoryOp.getMemRef()->getType().template cast<MemRefType>();
+ auto memRefType = memoryOp.getMemRef().getType().template cast<MemRefType>();
auto elementType = memRefType.getElementType();
// TODO(ntv): ponder whether we want to further vectorize a vector value.
/// TODO(ntv): handle more complex cases.
static Value vectorizeOperand(Value operand, Operation *op,
VectorizationState *state) {
- LLVM_DEBUG(dbgs() << "\n[early-vect]vectorize operand: ");
- LLVM_DEBUG(operand->print(dbgs()));
+ LLVM_DEBUG(dbgs() << "\n[early-vect]vectorize operand: " << operand);
// 1. If this value has already been vectorized this round, we are done.
- if (state->vectorizedSet.count(operand->getDefiningOp()) > 0) {
+ if (state->vectorizedSet.count(operand.getDefiningOp()) > 0) {
LLVM_DEBUG(dbgs() << " -> already vector operand");
return operand;
}
auto it = state->replacementMap.find(operand);
if (it != state->replacementMap.end()) {
auto res = it->second;
- LLVM_DEBUG(dbgs() << "-> delayed replacement by: ");
- LLVM_DEBUG(res->print(dbgs()));
+ LLVM_DEBUG(dbgs() << "-> delayed replacement by: " << res);
return res;
}
// 2. TODO(ntv): broadcast needed.
- if (operand->getType().isa<VectorType>()) {
+ if (operand.getType().isa<VectorType>()) {
LLVM_DEBUG(dbgs() << "-> non-vectorizable");
return nullptr;
}
// 3. vectorize constant.
- if (auto constant = dyn_cast<ConstantOp>(operand->getDefiningOp())) {
+ if (auto constant = dyn_cast<ConstantOp>(operand.getDefiningOp())) {
return vectorizeConstant(
op, constant,
- VectorType::get(state->strategy->vectorSizes, operand->getType()));
+ VectorType::get(state->strategy->vectorSizes, operand.getType()));
}
// 4. currently non-vectorizable.
- LLVM_DEBUG(dbgs() << "-> non-vectorizable");
- LLVM_DEBUG(operand->print(dbgs()));
+ LLVM_DEBUG(dbgs() << "-> non-vectorizable: " << operand);
return nullptr;
}
SmallVector<Type, 8> vectorTypes;
for (auto v : opInst->getResults()) {
vectorTypes.push_back(
- VectorType::get(state->strategy->vectorSizes, v->getType()));
+ VectorType::get(state->strategy->vectorSizes, v.getType()));
}
SmallVector<Value, 8> vectorOperands;
for (auto v : opInst->getOperands()) {
auto clonedLoop = cast<AffineForOp>(builder.clone(*loopInst));
struct Guard {
LogicalResult failure() {
- loop.getInductionVar()->replaceAllUsesWith(clonedLoop.getInductionVar());
+ loop.getInductionVar().replaceAllUsesWith(clonedLoop.getInductionVar());
loop.erase();
return mlir::failure();
}
// Replace the values directly with the return operands.
assert(returnOp.getNumOperands() == valuesToRepl.size());
for (const auto &it : llvm::enumerate(returnOp.getOperands()))
- valuesToRepl[it.index()]->replaceAllUsesWith(it.value());
+ valuesToRepl[it.index()].replaceAllUsesWith(it.value());
}
/// Attempt to materialize a conversion for a type mismatch between a call
Location conversionLoc) const final {
// Only allow conversion for i16/i32 types.
if (!(resultType.isInteger(16) || resultType.isInteger(32)) ||
- !(input->getType().isInteger(16) || input->getType().isInteger(32)))
+ !(input.getType().isInteger(16) || input.getType().isInteger(32)))
return nullptr;
return builder.create<TestCastOp>(conversionLoc, resultType, input);
}
llvm::Optional<Location> location, ValueRange operands,
ArrayRef<NamedAttribute> attributes, RegionRange regions,
SmallVectorImpl<Type> &inferedReturnTypes) {
- if (operands[0]->getType() != operands[1]->getType()) {
+ if (operands[0].getType() != operands[1].getType()) {
return emitOptionalError(location, "operand type mismatch ",
- operands[0]->getType(), " vs ",
- operands[1]->getType());
+ operands[0].getType(), " vs ",
+ operands[1].getType());
}
- inferedReturnTypes.assign({operands[0]->getType()});
+ inferedReturnTypes.assign({operands[0].getType()});
return success();
}
let arguments = (ins I32:$input1, I32:$input2, I64Attr:$attr);
let results = (outs I32);
}
-def HasOneUse: Constraint<CPred<"$0->hasOneUse()">, "has one use">;
+def HasOneUse: Constraint<CPred<"$0.hasOneUse()">, "has one use">;
def : Pattern<
// Bind to source pattern op operand/attribute/result
(OpSymbolBindingA:$res_a $operand, $attr), [
void handleNoResultOp(PatternRewriter &rewriter, OpSymbolBindingNoResult op) {
// Turn the no result op to a one-result op.
- rewriter.create<OpSymbolBindingB>(op.getLoc(), op.operand()->getType(),
+ rewriter.create<OpSymbolBindingB>(op.getLoc(), op.operand().getType(),
op.operand());
}
TypeConverter::SignatureConversion result(entry->getNumArguments());
for (unsigned i = 0, e = entry->getNumArguments(); i != e; ++i)
if (failed(converter.convertSignatureArg(
- i, entry->getArgument(i)->getType(), result)))
+ i, entry->getArgument(i).getType(), result)))
return matchFailure();
// Convert the region signature and just drop the operation.
matchAndRewrite(Operation *op, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const final {
// Check for a return of F32.
- if (op->getNumOperands() != 1 || !op->getOperand(0)->getType().isF32())
+ if (op->getNumOperands() != 1 || !op->getOperand(0).getType().isF32())
return matchFailure();
// Check if the first operation is a cast operation, if it is we use the
// results directly.
- auto *defOp = operands[0]->getDefiningOp();
+ auto *defOp = operands[0].getDefiningOp();
if (auto packerOp = llvm::dyn_cast_or_null<TestCastOp>(defOp)) {
rewriter.replaceOpWithNewOp<TestReturnOp>(op, packerOp.getOperands());
return matchSuccess();
matchAndRewrite(Operation *op, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const final {
// Verify that the incoming operand has been successfully remapped to F64.
- if (!operands[0]->getType().isF64())
+ if (!operands[0].getType().isF64())
return matchFailure();
rewriter.replaceOpWithNewOp<TestTypeConsumerOp>(op, operands[0]);
return matchSuccess();
target.addDynamicallyLegalOp<TestTypeProducerOp>(
[](TestTypeProducerOp op) { return op.getType().isF64(); });
target.addDynamicallyLegalOp<TestTypeConsumerOp>([](TestTypeConsumerOp op) {
- return op.getOperand()->getType().isF64();
+ return op.getOperand().getType().isF64();
});
// Check support for marking certain operations as recursively legal.
// Try to inline each of the call operations.
for (auto caller : callers) {
auto callee = dyn_cast_or_null<FunctionalRegionOp>(
- caller.getCallee()->getDefiningOp());
+ caller.getCallee().getDefiningOp());
if (!callee)
continue;
interface, &callee.body(), caller,
llvm::to_vector<8>(caller.getArgOperands()),
SmallVector<Value, 8>(caller.getResults()), caller.getLoc(),
- /*shouldCloneInlinedRegion=*/!callee.getResult()->hasOneUse())))
+ /*shouldCloneInlinedRegion=*/!callee.getResult().hasOneUse())))
continue;
// If the inlining was successful then erase the call and callee if
void TestMemRefStrideCalculation::runOnFunction() {
llvm::outs() << "Testing: " << getFunction().getName() << "\n";
getFunction().walk([&](AllocOp allocOp) {
- auto memrefType = allocOp.getResult()->getType().cast<MemRefType>();
+ auto memrefType = allocOp.getResult().getType().cast<MemRefType>();
int64_t offset;
SmallVector<int64_t, 4> strides;
if (failed(getStridesAndOffset(memrefType, strides, offset))) {
// As a consequence we write only Ops with a single return type for the
// purpose of this test. If we need to test more intricate behavior in the
// future we can always extend.
- auto superVectorType = opInst->getResult(0)->getType().cast<VectorType>();
+ auto superVectorType = opInst->getResult(0).getType().cast<VectorType>();
auto ratio = shapeRatio(superVectorType, subVectorType);
if (!ratio.hasValue()) {
opInst->emitRemark("NOT MATCHED");
// CHECK: void OpB::build(Builder *tblgen_builder, OperationState &tblgen_state, Type y, Value x)
// CHECK: tblgen_state.addTypes(y);
// CHECK: void OpB::build(Builder *tblgen_builder, OperationState &tblgen_state, Value x)
-// CHECK: tblgen_state.addTypes({x->getType()});
+// CHECK: tblgen_state.addTypes({x.getType()});
def OpC : NS_Op<"three_normal_result_op", []> {
let results = (outs I32:$x, /*unnamed*/I32, I32:$z);
}
// CHECK-LABEL: OpK::build(Builder *tblgen_builder, OperationState &tblgen_state, ValueRange input)
-// CHECK: tblgen_state.addTypes({input.front()->getType()});
+// CHECK: tblgen_state.addTypes({input.front().getType()});
// CHECK-LABEL: OpA::verify
// CHECK: for (Value v : getODSOperands(0)) {
-// CHECK: if (!((v->getType().isInteger(32) || v->getType().isF32())))
+// CHECK: if (!((v.getType().isInteger(32) || v.getType().isF32())))
def OpB : NS_Op<"op_for_And_PredOpTrait", [
PredOpTrait<"both first and second holds",
// CHECK-LABEL: OpK::verify
// CHECK: for (Value v : getODSOperands(0)) {
-// CHECK: if (!(((v->getType().isa<TensorType>())) && (((v->getType().cast<ShapedType>().getElementType().isF32())) || ((v->getType().cast<ShapedType>().getElementType().isInteger(32))))))
+// CHECK: if (!(((v.getType().isa<TensorType>())) && (((v.getType().cast<ShapedType>().getElementType().isF32())) || ((v.getType().cast<ShapedType>().getElementType().isInteger(32))))))
} else if (isResultName(op, name)) {
bs << formatv("valueMapping[op.{0}()]", name);
} else if (name == "_resultType") {
- bs << "op.getResult()->getType().cast<LLVM::LLVMType>()."
+ bs << "op.getResult().getType().cast<LLVM::LLVMType>()."
"getUnderlyingType()";
} else if (name == "_hasResult") {
bs << "opInst.getNumResults() == 1";
}
// Result types
- SmallVector<std::string, 2> resultTypes(numResults, "operands[0]->getType()");
+ SmallVector<std::string, 2> resultTypes(numResults, "operands[0].getType()");
body << " " << builderOpState << ".addTypes({"
<< llvm::join(resultTypes, ", ") << "});\n\n";
}
// Push all result types to the operation state
const char *index = op.getOperand(0).isVariadic() ? ".front()" : "";
std::string resultType =
- formatv("{0}{1}->getType()", getArgumentName(op, 0), index).str();
+ formatv("{0}{1}.getType()", getArgumentName(op, 0), index).str();
m.body() << " " << builderOpState << ".addTypes({" << resultType;
for (int i = 1; i != numResults; ++i)
m.body() << ", " << resultType;
if (value.isVariadic())
break;
if (!value.name.empty())
- verifyCtx.addSubst(
- value.name, formatv("(*this->getOperation()->getOperand({0}))", i));
+ verifyCtx.addSubst(value.name,
+ formatv("this->getOperation()->getOperand({0})", i));
}
for (int i = 0, e = op.getNumResults(); i < e; ++i) {
auto &value = op.getResult(i);
break;
if (!value.name.empty())
verifyCtx.addSubst(value.name,
- formatv("(*this->getOperation()->getResult({0}))", i));
+ formatv("this->getOperation()->getResult({0})", i));
}
// Verify the attributes have the correct type.
body << " (void)v;\n"
<< " if (!("
<< tgfmt(constraint.getConditionTemplate(),
- &fctx.withSelf("v->getType()"))
+ &fctx.withSelf("v.getType()"))
<< ")) {\n"
<< formatv(" return emitOpError(\"{0} #\") << index "
- "<< \" must be {1}, but got \" << v->getType();\n",
+ "<< \" must be {1}, but got \" << v.getType();\n",
valueKind, constraint.getDescription())
<< " }\n" // if
<< " ++index;\n"
os.indent(indent + 2) << formatv(
"auto *op{0} = "
- "(*castedOp{1}.getODSOperands({2}).begin())->getDefiningOp();\n",
+ "(*castedOp{1}.getODSOperands({2}).begin()).getDefiningOp();\n",
depth + 1, depth, i);
emitOpMatch(argTree, depth + 1);
os.indent(indent + 2)
PrintFatalError(loc, error);
}
auto self =
- formatv("(*castedOp{0}.getODSOperands({1}).begin())->getType()",
- depth, argIndex);
+ formatv("(*castedOp{0}.getODSOperands({1}).begin()).getType()", depth,
+ argIndex);
os.indent(indent) << "if (!("
<< tgfmt(matcher.getConditionTemplate(),
&fmtCtx.withSelf(self))
auto cmd = "if (!({0})) return matchFailure();\n";
if (isa<TypeConstraint>(constraint)) {
- auto self = formatv("({0}->getType())",
+ auto self = formatv("({0}.getType())",
symbolInfoMap.getValueAndRangeUse(entities.front()));
os.indent(4) << formatv(cmd,
tgfmt(condition, &fmtCtx.withSelf(self.str())));
if (numResults != 0) {
for (int i = 0; i < numResults; ++i)
os.indent(6) << formatv("for (auto v : castedOp0.getODSResults({0})) {{"
- "tblgen_types.push_back(v->getType()); }\n",
+ "tblgen_types.push_back(v.getType()); }\n",
resultIndex + i);
}
os.indent(6) << formatv("{0} = rewriter.create<{1}>(loc, tblgen_types, "