/// to_memref op are different, a memref.cast is needed.
LogicalResult mlir::bufferization::foldToMemrefToTensorPair(
RewriterBase &rewriter, ToMemrefOp toMemref, bool allowSameType) {
- auto memrefToTensor = toMemref.tensor().getDefiningOp<ToTensorOp>();
+ auto memrefToTensor = toMemref.getTensor().getDefiningOp<ToTensorOp>();
if (!memrefToTensor)
return failure();
- Type srcType = memrefToTensor.memref().getType();
+ Type srcType = memrefToTensor.getMemref().getType();
Type destType = toMemref.getType();
// Directly rewrite if the type did not change.
// Function can be configured to only handle cases where a cast is needed.
if (!allowSameType)
return failure();
- rewriter.replaceOp(toMemref, memrefToTensor.memref());
+ rewriter.replaceOp(toMemref, memrefToTensor.getMemref());
return success();
}
// Ranked memref -> Ranked memref cast.
if (rankedSrcType && rankedDestType) {
FailureOr<Value> replacement = castOrReallocMemRefValue(
- rewriter, memrefToTensor.memref(), rankedDestType);
+ rewriter, memrefToTensor.getMemref(), rankedDestType);
if (failed(replacement))
return failure();
assert(memref::CastOp::areCastCompatible(srcType, destType) &&
"expected that types are cast compatible");
rewriter.replaceOpWithNewOp<memref::CastOp>(toMemref, destType,
- memrefToTensor.memref());
+ memrefToTensor.getMemref());
return success();
}
// Create buffer allocation.
Value copyBuffer;
- if (copy())
- copyBuffer = getBuffer(rewriter, copy(), options);
+ if (getCopy())
+ copyBuffer = getBuffer(rewriter, getCopy(), options);
auto allocType =
MemRefType::get(getType().getShape(), getType().getElementType());
- SmallVector<Value> dynamicDims = dynamicSizes();
- if (copy()) {
+ SmallVector<Value> dynamicDims = getDynamicSizes();
+ if (getCopy()) {
assert(dynamicDims.empty() && "expected either `copy` or `dynamicDims`");
populateDynamicDimSizes(rewriter, loc, copyBuffer, dynamicDims);
}
return failure();
// Create memory copy (if any).
- if (copy()) {
+ if (getCopy()) {
if (failed(options.createMemCpy(rewriter, loc, copyBuffer, *alloc)))
return failure();
}
// Should the buffer be deallocated?
AnalysisState analysisState(options);
bool dealloc;
- if (escape().hasValue()) {
- dealloc = !*escape();
+ if (getEscape().hasValue()) {
+ dealloc = !*getEscape();
} else {
// No "escape" annotation found.
if (options.createDeallocs) {
bool AllocTensorOp::isMemoryWrite(OpResult opResult,
const AnalysisState &state) {
// AllocTensorOps do not write unless they have a `copy` value.
- return static_cast<bool>(copy());
+ return static_cast<bool>(getCopy());
}
bool AllocTensorOp::bufferizesToMemoryRead(OpOperand &opOperand,
}
LogicalResult AllocTensorOp::verify() {
- if (copy() && !dynamicSizes().empty())
+ if (getCopy() && !getDynamicSizes().empty())
return emitError("dynamic sizes not needed when copying a tensor");
- if (!copy() && getType().getNumDynamicDims() !=
- static_cast<int64_t>(dynamicSizes().size()))
+ if (!getCopy() && getType().getNumDynamicDims() !=
+ static_cast<int64_t>(getDynamicSizes().size()))
return emitError("expected ")
<< getType().getNumDynamicDims() << " dynamic sizes";
- if (copy() && copy().getType() != getType())
+ if (getCopy() && getCopy().getType() != getType())
return emitError("expected that `copy` and return type match");
return success();
}
LogicalResult matchAndRewrite(AllocTensorOp op,
PatternRewriter &rewriter) const override {
- if (op.copy())
+ if (op.getCopy())
return failure();
SmallVector<int64_t> newShape = llvm::to_vector(op.getType().getShape());
SmallVector<Value> newDynamicSizes;
for (int64_t i = 0; i < op.getType().getRank(); ++i) {
if (!op.isDynamicDim(i))
continue;
- Value value = op.dynamicSizes()[dynValCounter++];
+ Value value = op.getDynamicSizes()[dynValCounter++];
APInt intVal;
if (matchPattern(value, m_ConstantInt(&intVal))) {
newShape[i] = intVal.getSExtValue();
return failure();
auto newOp = rewriter.create<AllocTensorOp>(
op.getLoc(), newType, newDynamicSizes, /*copy=*/Value(),
- /*escape=*/op.escapeAttr());
+ /*escape=*/op.getEscapeAttr());
rewriter.replaceOpWithNewOp<tensor::CastOp>(op, op.getType(), newOp);
return success();
}
}
void AllocTensorOp::print(OpAsmPrinter &p) {
- p << "(" << dynamicSizes() << ")";
- if (copy())
- p << " copy(" << copy() << ")";
+ p << "(" << getDynamicSizes() << ")";
+ if (getCopy())
+ p << " copy(" << getCopy() << ")";
p.printOptionalAttrDict((*this)->getAttrs(), /*elidedAttrs=*/{
AllocTensorOp::getOperandSegmentSizeAttr()});
p << " : ";
- auto type = result().getType();
+ auto type = getResult().getType();
if (auto validType = type.dyn_cast<::mlir::TensorType>())
p.printStrippedAttrOrType(validType);
else
Value AllocTensorOp::getDynamicSize(OpBuilder &b, unsigned idx) {
assert(isDynamicDim(idx) && "expected dynamic dim");
- if (copy())
- return b.create<tensor::DimOp>(getLoc(), copy(), idx);
+ if (getCopy())
+ return b.create<tensor::DimOp>(getLoc(), getCopy(), idx);
return getOperand(getIndexOfDynamicSize(idx));
}
void CloneOp::getEffects(
SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
&effects) {
- effects.emplace_back(MemoryEffects::Read::get(), input(),
+ effects.emplace_back(MemoryEffects::Read::get(), getInput(),
SideEffects::DefaultResource::get());
- effects.emplace_back(MemoryEffects::Write::get(), output(),
+ effects.emplace_back(MemoryEffects::Write::get(), getOutput(),
SideEffects::DefaultResource::get());
- effects.emplace_back(MemoryEffects::Allocate::get(), output(),
+ effects.emplace_back(MemoryEffects::Allocate::get(), getOutput(),
SideEffects::DefaultResource::get());
}
return success();
}
- Value source = cloneOp.input();
+ Value source = cloneOp.getInput();
// This only finds dealloc operations for the immediate value. It should
// also consider aliases. That would also make the safety check below
// redundant.
llvm::Optional<Operation *> maybeCloneDeallocOp =
- memref::findDealloc(cloneOp.output());
+ memref::findDealloc(cloneOp.getOutput());
// Skip if either of them has > 1 deallocate operations.
if (!maybeCloneDeallocOp.hasValue())
return failure();
//===----------------------------------------------------------------------===//
OpFoldResult ToTensorOp::fold(ArrayRef<Attribute>) {
- if (auto toMemref = memref().getDefiningOp<ToMemrefOp>())
+ if (auto toMemref = getMemref().getDefiningOp<ToMemrefOp>())
// Approximate alias analysis by conservatively folding only when no there
// is no interleaved operation.
if (toMemref->getBlock() == this->getOperation()->getBlock() &&
toMemref->getNextNode() == this->getOperation())
- return toMemref.tensor();
+ return toMemref.getTensor();
return {};
}
if (!memrefToTensorOp)
return failure();
- rewriter.replaceOpWithNewOp<memref::DimOp>(dimOp, memrefToTensorOp.memref(),
- dimOp.index());
+ rewriter.replaceOpWithNewOp<memref::DimOp>(
+ dimOp, memrefToTensorOp.getMemref(), dimOp.index());
return success();
}
};
//===----------------------------------------------------------------------===//
OpFoldResult ToMemrefOp::fold(ArrayRef<Attribute>) {
- if (auto memrefToTensor = tensor().getDefiningOp<ToTensorOp>())
- if (memrefToTensor.memref().getType() == getType())
- return memrefToTensor.memref();
+ if (auto memrefToTensor = getTensor().getDefiningOp<ToTensorOp>())
+ if (memrefToTensor.getMemref().getType() == getType())
+ return memrefToTensor.getMemref();
return {};
}
if (!toMemref)
return failure();
- rewriter.replaceOpWithNewOp<tensor::ExtractOp>(load, toMemref.tensor(),
+ rewriter.replaceOpWithNewOp<tensor::ExtractOp>(load, toMemref.getTensor(),
load.indices());
return success();
}