Adapt the StructuredOp verifier to ensure all operands are either in the input or the output group. The change is possible after adding support for scalar input operands (https://reviews.llvm.org/D104220).
Differential Revision: https://reviews.llvm.org/D104783
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return getNumInputs() + getNumOutputs();
+ return this->getOperation()->getNumOperands();
}]
>,
//===------------------------------------------------------------------===//
result.reserve(numOutputs);
llvm::transform(
this->getOperation()->getOpOperands()
- .drop_front(getNumInputs())
- .take_front(numOutputs),
+ .take_back(numOutputs),
std::back_inserter(result),
[](OpOperand &opOperand) { return &opOperand; });
return result;
OpOperandVector result;
result.reserve(numInputsAndOutputs);
llvm::transform(
- this->getOperation()->getOpOperands()
- .take_front(numInputsAndOutputs),
+ this->getOperation()->getOpOperands(),
std::back_inserter(result),
[](OpOperand &opOperand) { return &opOperand; });
return result;
/// `createFlatListOfOperandStaticDims`.
SmallVector<int64_t, 4> computeStaticLoopSizes();
- /// Returns all the operands past the inputs, output_buffers and
- /// init_tensors operands. Asserts that these operands are value types to
- /// allow transformations like tiling to just use the values when cloning
- /// `linalgOp`.
- Operation::operand_range getAssumedNonShapedOperands() {
- Operation::operand_range res{
- getOperation()->getOperands().begin() + getNumInputsAndOutputs(),
- getOperation()->getOperands().end()};
- for (Type t : TypeRange{res}) {
- (void)t;
- assert((t.isSignlessIntOrIndexOrFloat() || t.template isa<VectorType>())
- &&"expected scalar or vector type");
- }
- return res;
- }
-
/// Returns the value that expresses the shape of the output in terms of
/// shape of the input operands where possible
LogicalResult reifyReturnTypeShapesPerResultDim(OpBuilder &b,
LogicalResult mlir::linalg::detail::verifyStructuredOpInterface(Operation *op) {
LinalgOp linalgOp = cast<LinalgOp>(op);
- // Expect at least one input/output operand.
+ // Expect at least one output operand.
// This means an op that constructs a tensor out of indices cannot be a
// LinalgOp at the moment. For now this will have to be a special op until we
// have output shape operands that are not tensors.
- int64_t numInputsAndOutputs = linalgOp.getNumInputsAndOutputs();
- if (numInputsAndOutputs == 0)
- return op->emitOpError("expected at least one input/output operand");
- if (failed(OpTrait::impl::verifyAtLeastNOperands(op, numInputsAndOutputs)))
+ int64_t numInputs = linalgOp.getNumInputs();
+ int64_t numOutputs = linalgOp.getNumOutputs();
+ if (numOutputs == 0)
+ return op->emitOpError("expected at least one output operand");
+ if (failed(OpTrait::impl::verifyNOperands(op, numInputs + numOutputs)))
return failure();
// Should have at least one output tensor per result tensor.
// Can also have outbut buffers that do not correspond to results.
: opOperand->get());
newResultTypes.push_back(newOperands.back().getType());
}
- auto extraOperands = op.getAssumedNonShapedOperands();
- newOperands.append(extraOperands.begin(), extraOperands.end());
// Clone op.
Operation *newOp =
op.clone(rewriter, op->getLoc(), newResultTypes, newOperands);
newOperands.push_back(opOperand->get());
SmallVector<Value> outputOperands = op.getOutputOperands();
llvm::append_range(newOperands, outputOperands);
- llvm::append_range(newOperands, op.getAssumedNonShapedOperands());
// Repair the indexing maps by filtering out the ones that have been
// eliminated.
assert(!isa<linalg::GenericOp>(linalgOp.getOperation()));
SmallVector<Value, 8> newOperands = inputs;
newOperands.append(outputs.begin(), outputs.end());
- auto otherOperands = linalgOp.getAssumedNonShapedOperands();
- newOperands.append(otherOperands.begin(), otherOperands.end());
linalgOp.clone(rewriter, linalgOp.getLoc(),
/*resultTypes=*/ArrayRef<Type>{}, newOperands);
// Replace the results of the old op with the new output buffers.
// Clone the newly bufferized op.
SmallVector<Value> newOperands = newInputBuffers;
newOperands.append(newOutputBuffers.begin(), newOutputBuffers.end());
- auto otherOperands = op.getAssumedNonShapedOperands();
- newOperands.append(otherOperands.begin(), otherOperands.end());
op.clone(b, loc, /*resultTypes=*/TypeRange{}, newOperands);
// Replace the results of the old op with the new output buffers.
getTiledOperands(b, producer), ivs,
tileSizes, sizeBounds));
- // Append the other operands.
- auto operands = producer.getAssumedNonShapedOperands();
- clonedShapes.append(operands.begin(), operands.end());
-
// Iterate over the results in order.
// Extract the subtensor type from the linearized range.
// Since we do not enforce any canonicalizations on the fly, this is always
applyMapToValues(b, loc, shapeSizesToLoopsMap, allShapeSizes);
SmallVector<Value, 4> tiledOperands = makeTiledShapes(
b, loc, op, operands, interchangedIvs, tileSizes, sizeBounds);
- auto nonShapedOperands = op.getAssumedNonShapedOperands();
- tiledOperands.append(nonShapedOperands.begin(), nonShapedOperands.end());
// TODO: use an interface/adaptor to avoid leaking position in
// `tiledOperands`.
// Clone `opToPad` to operate on the statically padded shapes.
auto resultTensorTypes =
ValueRange(newOperands).take_back(opToPad.getNumOutputs()).getTypes();
- ValueRange otherOperands = opToPad.getAssumedNonShapedOperands();
- newOperands.append(otherOperands.begin(), otherOperands.end());
linalg::LinalgOp paddedOp =
opToPad.clone(rewriter, loc, resultTensorTypes, newOperands);