From fb4cedcc1e0f8ec1071d23fd3910806398c6d6b0 Mon Sep 17 00:00:00 2001 From: Aliia Khasanova Date: Thu, 20 Oct 2022 12:39:03 +0000 Subject: [PATCH] [mlir][nfc] Clean-up usage of kDynamicSize. This patch prepares MLIR code base to change the value of kDynamicSize. https://discourse.llvm.org/t/rfc-unify-kdynamicsize-and-kdynamicstrideoroffset/64534/4 Differential Revision: https://reviews.llvm.org/D136327 --- flang/include/flang/Optimizer/Dialect/FIRTypes.td | 2 +- flang/lib/Optimizer/Transforms/AffinePromotion.cpp | 3 +- mlir/lib/AsmParser/TypeParser.cpp | 2 +- mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp | 5 +- mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp | 11 ++- mlir/lib/Dialect/Affine/Utils/Utils.cpp | 2 +- mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp | 7 +- mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp | 5 +- mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp | 7 +- .../SparseTensor/IR/SparseTensorDialect.cpp | 2 +- mlir/lib/Dialect/Tosa/IR/TosaOps.cpp | 108 +++++++++++---------- .../Tosa/Transforms/TosaDecomposeConv2D.cpp | 20 ++-- mlir/lib/Dialect/Traits.cpp | 7 +- mlir/lib/IR/BuiltinTypes.cpp | 6 +- mlir/python/mlir/dialects/_tensor_ops_ext.py | 2 +- mlir/test/python/dialects/linalg/ops.py | 6 +- mlir/test/python/dialects/shape.py | 2 +- mlir/test/python/dialects/tensor.py | 5 +- mlir/test/python/dialects/vector.py | 4 +- mlir/unittests/Dialect/BroadcastShapeTest.cpp | 7 +- 20 files changed, 123 insertions(+), 90 deletions(-) diff --git a/flang/include/flang/Optimizer/Dialect/FIRTypes.td b/flang/include/flang/Optimizer/Dialect/FIRTypes.td index f179071..0d06e1d 100644 --- a/flang/include/flang/Optimizer/Dialect/FIRTypes.td +++ b/flang/include/flang/Optimizer/Dialect/FIRTypes.td @@ -128,7 +128,7 @@ def fir_CharacterType : FIR_Type<"Character", "char"> { static constexpr LenType singleton() { return 1; } /// Character has a LEN value which is not a compile-time known constant. - static constexpr LenType unknownLen() { return -1; } + static constexpr LenType unknownLen() { return mlir::ShapedType::kDynamicSize; } /// Character LEN is a runtime value. bool hasDynamicLen() { return getLen() == unknownLen(); } diff --git a/flang/lib/Optimizer/Transforms/AffinePromotion.cpp b/flang/lib/Optimizer/Transforms/AffinePromotion.cpp index ae152cf..eaf0cd1 100644 --- a/flang/lib/Optimizer/Transforms/AffinePromotion.cpp +++ b/flang/lib/Optimizer/Transforms/AffinePromotion.cpp @@ -410,7 +410,8 @@ createAffineOps(mlir::Value arrayRef, mlir::PatternRewriter &rewriter) { auto affineApply = rewriter.create(acoOp.getLoc(), affineMap, indexArgs); auto arrayElementType = coordinateArrayElement(acoOp); - auto newType = mlir::MemRefType::get({-1}, arrayElementType); + auto newType = + mlir::MemRefType::get({mlir::ShapedType::kDynamicSize}, arrayElementType); auto arrayConvert = rewriter.create(acoOp.getLoc(), newType, acoOp.getMemref()); return std::make_pair(affineApply, arrayConvert); diff --git a/mlir/lib/AsmParser/TypeParser.cpp b/mlir/lib/AsmParser/TypeParser.cpp index 5ab7a89..fa428b2 100644 --- a/mlir/lib/AsmParser/TypeParser.cpp +++ b/mlir/lib/AsmParser/TypeParser.cpp @@ -510,7 +510,7 @@ Parser::parseDimensionListRanked(SmallVectorImpl &dimensions, if (consumeIf(Token::question)) { if (!allowDynamic) return emitError(loc, "expected static shape"); - dimensions.push_back(-1); + dimensions.push_back(ShapedType::kDynamicSize); } else { int64_t value; if (failed(parseIntegerInDimensionList(value))) diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp index e176e88..381c0a1 100644 --- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp +++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp @@ -843,7 +843,7 @@ static bool findIntermediateShape(ArrayRef lhsShape, bool isDynamic) { if (isDynamic) { // TODO (natashaknk): Make dynamic intermediate shape not always be rank-1 - intermediateShape = {-1}; + intermediateShape = {ShapedType::kDynamicSize}; return true; } @@ -1778,7 +1778,8 @@ struct TileConverter : public OpConversionPattern { // Broadcast the newly added dimensions to their appropriate multiple. SmallVector genericShape; for (int i = 0; i < rank; i++) { - genericShape.push_back(multiples[i]); + int64_t dim = multiples[i]; + genericShape.push_back(dim == -1 ? ShapedType::kDynamicSize : dim); genericShape.push_back(inputShape[i]); } diff --git a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp index 9f2714d..41c1928 100644 --- a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp +++ b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp @@ -30,16 +30,16 @@ public: PatternRewriter &rewriter) const final { Location loc = sliceOp.getLoc(); Value input = sliceOp.getInput(); - SmallVector strides; + SmallVector strides, sizes; auto starts = sliceOp.getStart(); - auto sizes = sliceOp.getSize(); strides.resize(sliceOp.getType().template cast().getRank(), 1); SmallVector dynSizes; - for (const auto &i : llvm::enumerate(sizes)) { + for (const auto &i : llvm::enumerate(sliceOp.getSize())) { int64_t size = i.value().cast().getInt(); size_t index = i.index(); - if (size != ShapedType::kDynamicSize) + sizes.push_back(size == -1 ? ShapedType::kDynamicSize : size); + if (!ShapedType::isDynamic(sizes.back())) continue; auto dim = rewriter.create(loc, input, index); @@ -51,7 +51,8 @@ public: auto newSliceOp = rewriter.create( sliceOp.getLoc(), sliceOp.getType(), input, ValueRange({}), dynSizes, - ValueRange({}), starts, sizes, rewriter.getI64ArrayAttr(strides)); + ValueRange({}), starts, rewriter.getI64ArrayAttr(sizes), + rewriter.getI64ArrayAttr(strides)); rewriter.replaceOp(sliceOp, newSliceOp.getResult()); return success(); diff --git a/mlir/lib/Dialect/Affine/Utils/Utils.cpp b/mlir/lib/Dialect/Affine/Utils/Utils.cpp index 53ab113..9f074a4 100644 --- a/mlir/lib/Dialect/Affine/Utils/Utils.cpp +++ b/mlir/lib/Dialect/Affine/Utils/Utils.cpp @@ -1796,7 +1796,7 @@ MemRefType mlir::normalizeMemRefType(MemRefType memrefType, bool isDynDim = isNormalizedMemRefDynamicDim(d, layoutMap, memrefTypeDynDims, context); if (isDynDim) { - newShape[d] = -1; + newShape[d] = ShapedType::kDynamicSize; } else { // The lower bound for the shape is always zero. Optional ubConst = diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp index 1702cd6..6a94f50 100644 --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp @@ -266,11 +266,12 @@ static Type parseVectorType(AsmParser &parser) { // We parsed a generic dimension list, but vectors only support two forms: // - single non-dynamic entry in the list (fixed vector); - // - two elements, the first dynamic (indicated by -1) and the second + // - two elements, the first dynamic (indicated by ShapedType::kDynamicSize) + // and the second // non-dynamic (scalable vector). if (dims.empty() || dims.size() > 2 || - ((dims.size() == 2) ^ (dims[0] == -1)) || - (dims.size() == 2 && dims[1] == -1)) { + ((dims.size() == 2) ^ (ShapedType::isDynamic(dims[0]))) || + (dims.size() == 2 && ShapedType::isDynamic(dims[1]))) { parser.emitError(dimPos) << "expected '? x x ' or ' x '"; return Type(); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp index 0995b01..6f642ea 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp @@ -64,7 +64,8 @@ static Value allocBuffer(ImplicitLocOpBuilder &b, } // Fallback dynamic buffer. - auto dynamicBufferType = MemRefType::get(-1, b.getIntegerType(8)); + auto dynamicBufferType = + MemRefType::get(ShapedType::kDynamicSize, b.getIntegerType(8)); Value mul = b.createOrFold( b.create(width), allocSize); if (options.useAlloca) @@ -242,7 +243,7 @@ FailureOr mlir::linalg::promoteSubviewAsNewBuffer( partialSizes.push_back( b.createOrFold(loc, subView, resultDimIdx++)); } - SmallVector dynSizes(fullSizes.size(), -1); + SmallVector dynSizes(fullSizes.size(), ShapedType::kDynamicSize); // If a callback is not specified, then use the default implementation for // allocating the promoted buffer. Optional fullLocalView = allocationFn(b, subView, fullSizes, layout); diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp index 9a6727d..fb31737 100644 --- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp +++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp @@ -185,7 +185,7 @@ struct SimplifyAllocConst : public OpRewritePattern { for (unsigned dim = 0, e = memrefType.getRank(); dim < e; ++dim) { int64_t dimSize = memrefType.getDimSize(dim); // If this is already static dimension, keep it. - if (dimSize != -1) { + if (!ShapedType::isDynamic(dimSize)) { newShapeConstants.push_back(dimSize); continue; } @@ -197,7 +197,7 @@ struct SimplifyAllocConst : public OpRewritePattern { newShapeConstants.push_back(constantIndexOp.value()); } else { // Dynamic shape dimension not folded; copy dynamicSize from old memref. - newShapeConstants.push_back(-1); + newShapeConstants.push_back(ShapedType::kDynamicSize); dynamicSizes.push_back(dynamicSize); } dynamicDimPos++; @@ -666,7 +666,8 @@ bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) { for (unsigned i = 0, e = aT.getRank(); i != e; ++i) { int64_t aDim = aT.getDimSize(i), bDim = bT.getDimSize(i); - if (aDim != -1 && bDim != -1 && aDim != bDim) + if (!ShapedType::isDynamic(aDim) && !ShapedType::isDynamic(bDim) && + aDim != bDim) return false; } return true; diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp index e9168f7..95a7a47 100644 --- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -506,7 +506,7 @@ LogicalResult ConcatenateOp::verify() { "sum of all the concatenation dimensions of the input tensors."); } } else { - int prev = dstDim; + int64_t prev = dstDim; for (auto src : getInputs()) { auto d = src.getType().cast().getShape()[i]; if (prev != ShapedType::kDynamicSize && d != prev) diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp index 841a274..f946996 100644 --- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp @@ -434,7 +434,7 @@ LogicalResult tosa::ConcatOp::inferReturnTypeComponents( } // Determine the dimension size along the concatenation axis. - int concatDimSize = 0; + int64_t concatDimSize = 0; for (auto operand : operands) { ShapeAdaptor operandShape = operands.getShape(operand); @@ -645,7 +645,7 @@ LogicalResult tosa::TileOp::inferReturnTypeComponents( // Any non dynamic dimension can be multiplied to a known size. outputShape.reserve(multiples.size()); for (int i = 0, s = inputShape.getRank(); i < s; i++) { - int dim = inputShape.getDimSize(i); + int64_t dim = inputShape.getDimSize(i); if (dim != ShapedType::kDynamicSize) dim *= multipleValues[i]; outputShape.push_back(dim); @@ -655,6 +655,12 @@ LogicalResult tosa::TileOp::inferReturnTypeComponents( return success(); } +static SmallVector ConvertToMlirShape(ArrayRef shape) { + return to_vector(llvm::map_range(shape, [](int64_t dim) { + return dim == -1 ? ShapedType::kDynamicSize : dim; + })); +} + LogicalResult tosa::ReshapeOp::inferReturnTypeComponents( MLIRContext *context, ::llvm::Optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, @@ -665,6 +671,7 @@ LogicalResult tosa::ReshapeOp::inferReturnTypeComponents( ArrayAttr newShape = adaptor.getNewShape(); llvm::SmallVector newShapeValue; getI64Values(newShape, newShapeValue); + newShapeValue = ConvertToMlirShape(newShapeValue); // We cannot infer from the total number of elements so we must take the // shape attribute as exact. @@ -679,14 +686,14 @@ LogicalResult tosa::ReshapeOp::inferReturnTypeComponents( int64_t numElements = inputShape.getNumElements(); int64_t staticMul = 1; for (auto val : newShapeValue) { - if (val != ShapedType::kDynamicSize) { + if (!ShapedType::isDynamic(val)) { staticMul *= val; } } // Determine the length of the dynamic dimension. for (auto &val : newShapeValue) { - if (val == ShapedType::kDynamicSize) + if (ShapedType::isDynamic(val)) val = numElements / staticMul; } @@ -800,8 +807,8 @@ LogicalResult tosa::ResizeOp::inferReturnTypeComponents( outputShape[0] = inputShape.getDimSize(0); outputShape[3] = inputShape.getDimSize(3); - int32_t inputHeight = inputShape.getDimSize(1); - int32_t inputWidth = inputShape.getDimSize(2); + int64_t inputHeight = inputShape.getDimSize(1); + int64_t inputWidth = inputShape.getDimSize(2); if ((inputHeight == ShapedType::kDynamicSize) || (inputWidth == ShapedType::kDynamicSize)) @@ -961,7 +968,7 @@ static LogicalResult poolingInferReturnTypes( SmallVectorImpl &inferredReturnShapes) { ShapeAdaptor inputShape = operands.getShape(0); llvm::SmallVector outputShape; - outputShape.resize(4, -1); + outputShape.resize(4, ShapedType::kDynamicSize); // We only know the rank if the input type is unranked. if (!inputShape) { @@ -973,8 +980,8 @@ static LogicalResult poolingInferReturnTypes( outputShape[0] = inputShape.getDimSize(0); outputShape[3] = inputShape.getDimSize(3); - int32_t height = inputShape.getDimSize(1); - int32_t width = inputShape.getDimSize(2); + int64_t height = inputShape.getDimSize(1); + int64_t width = inputShape.getDimSize(2); llvm::SmallVector kernel; llvm::SmallVector stride; @@ -984,13 +991,13 @@ static LogicalResult poolingInferReturnTypes( getI64Values(attributes.get("stride").cast(), stride); getI64Values(attributes.get("pad").cast(), pad); - if (height != -1) { - int32_t padded = height + pad[0] + pad[1] - kernel[0]; + if (!ShapedType::isDynamic(height)) { + int64_t padded = height + pad[0] + pad[1] - kernel[0]; outputShape[1] = padded / stride[0] + 1; } - if (width != -1) { - int32_t padded = width + pad[2] + pad[3] - kernel[1]; + if (!ShapedType::isDynamic(width)) { + int64_t padded = width + pad[2] + pad[3] - kernel[1]; outputShape[2] = padded / stride[1] + 1; } @@ -1005,10 +1012,10 @@ LogicalResult Conv2DOp::inferReturnTypeComponents( llvm::SmallVector outputShape(4, ShapedType::kDynamicSize); Conv2DOp::Adaptor adaptor(operands.getValues(), attributes); - int32_t inputWidth = ShapedType::kDynamicSize; - int32_t inputHeight = ShapedType::kDynamicSize; - int32_t weightWidth = ShapedType::kDynamicSize; - int32_t weightHeight = ShapedType::kDynamicSize; + int64_t inputWidth = ShapedType::kDynamicSize; + int64_t inputHeight = ShapedType::kDynamicSize; + int64_t weightWidth = ShapedType::kDynamicSize; + int64_t weightHeight = ShapedType::kDynamicSize; // Input shape describes input width/height and batch. @@ -1045,17 +1052,17 @@ LogicalResult Conv2DOp::inferReturnTypeComponents( if (!ShapedType::isDynamic(inputHeight) && !ShapedType::isDynamic(weightHeight)) { - int32_t inputSize = inputHeight + padding[0] + padding[1]; - int32_t filterSize = (weightHeight - 1) * dilation[0] + 1; - int32_t unstridedResult = inputSize - filterSize + 1; + int64_t inputSize = inputHeight + padding[0] + padding[1]; + int64_t filterSize = (weightHeight - 1) * dilation[0] + 1; + int64_t unstridedResult = inputSize - filterSize + 1; outputShape[1] = (unstridedResult - 1) / stride[0] + 1; } if (!ShapedType::isDynamic(inputWidth) && !ShapedType::isDynamic(weightWidth)) { - int32_t inputSize = inputWidth + padding[2] + padding[3]; - int32_t filterSize = (weightWidth - 1) * dilation[1] + 1; - int32_t unstridedResult = inputSize - filterSize + 1; + int64_t inputSize = inputWidth + padding[2] + padding[3]; + int64_t filterSize = (weightWidth - 1) * dilation[1] + 1; + int64_t unstridedResult = inputSize - filterSize + 1; outputShape[2] = (unstridedResult - 1) / stride[1] + 1; } @@ -1072,13 +1079,13 @@ LogicalResult Conv3DOp::inferReturnTypeComponents( llvm::SmallVector outputShape(5, ShapedType::kDynamicSize); Conv3DOp::Adaptor adaptor(operands.getValues(), attributes); - int32_t inputWidth = ShapedType::kDynamicSize; - int32_t inputHeight = ShapedType::kDynamicSize; - int32_t inputDepth = ShapedType::kDynamicSize; + int64_t inputWidth = ShapedType::kDynamicSize; + int64_t inputHeight = ShapedType::kDynamicSize; + int64_t inputDepth = ShapedType::kDynamicSize; - int32_t weightWidth = ShapedType::kDynamicSize; - int32_t weightHeight = ShapedType::kDynamicSize; - int32_t weightDepth = ShapedType::kDynamicSize; + int64_t weightWidth = ShapedType::kDynamicSize; + int64_t weightHeight = ShapedType::kDynamicSize; + int64_t weightDepth = ShapedType::kDynamicSize; // Input shape describes input width/height and batch. ShapeAdaptor inputShape = operands.getShape(adaptor.getInput()); @@ -1163,13 +1170,13 @@ LogicalResult DepthwiseConv2DOp::inferReturnTypeComponents( llvm::SmallVector outputShape(4, ShapedType::kDynamicSize); DepthwiseConv2DOp::Adaptor adaptor(operands.getValues(), attributes); - int32_t inputWidth = ShapedType::kDynamicSize; - int32_t inputHeight = ShapedType::kDynamicSize; - int32_t inputChannels = ShapedType::kDynamicSize; + int64_t inputWidth = ShapedType::kDynamicSize; + int64_t inputHeight = ShapedType::kDynamicSize; + int64_t inputChannels = ShapedType::kDynamicSize; - int32_t weightWidth = ShapedType::kDynamicSize; - int32_t weightHeight = ShapedType::kDynamicSize; - int32_t depthChannels = ShapedType::kDynamicSize; + int64_t weightWidth = ShapedType::kDynamicSize; + int64_t weightHeight = ShapedType::kDynamicSize; + int64_t depthChannels = ShapedType::kDynamicSize; // Input shape describes input width/height and batch. ShapeAdaptor inputShape = operands.getShape(adaptor.getInput()); @@ -1216,17 +1223,17 @@ LogicalResult DepthwiseConv2DOp::inferReturnTypeComponents( if (!ShapedType::isDynamic(inputHeight) && !ShapedType::isDynamic(weightHeight)) { - int32_t inputSize = inputHeight + padding[0] + padding[1]; - int32_t filterSize = (weightHeight - 1) * dilation[0] + 1; - int32_t unstridedResult = inputSize - filterSize + 1; + int64_t inputSize = inputHeight + padding[0] + padding[1]; + int64_t filterSize = (weightHeight - 1) * dilation[0] + 1; + int64_t unstridedResult = inputSize - filterSize + 1; outputShape[1] = (unstridedResult - 1) / stride[0] + 1; } if (!ShapedType::isDynamic(inputWidth) && !ShapedType::isDynamic(weightWidth)) { - int32_t inputSize = inputWidth + padding[2] + padding[3]; - int32_t filterSize = (weightWidth - 1) * dilation[1] + 1; - int32_t unstridedResult = inputSize - filterSize + 1; + int64_t inputSize = inputWidth + padding[2] + padding[3]; + int64_t filterSize = (weightWidth - 1) * dilation[1] + 1; + int64_t unstridedResult = inputSize - filterSize + 1; outputShape[2] = (unstridedResult - 1) / stride[1] + 1; } @@ -1243,11 +1250,12 @@ LogicalResult TransposeConv2DOp::inferReturnTypeComponents( TransposeConv2DOp::Adaptor adaptor(operands.getValues(), attributes); llvm::SmallVector outputShape; getI64Values(adaptor.getOutShape(), outputShape); + outputShape = ConvertToMlirShape(outputShape); - int32_t inputWidth = ShapedType::kDynamicSize; - int32_t inputHeight = ShapedType::kDynamicSize; - int32_t weightWidth = ShapedType::kDynamicSize; - int32_t weightHeight = ShapedType::kDynamicSize; + int64_t inputWidth = ShapedType::kDynamicSize; + int64_t inputHeight = ShapedType::kDynamicSize; + int64_t weightWidth = ShapedType::kDynamicSize; + int64_t weightHeight = ShapedType::kDynamicSize; // Input shape describes input width/height and batch. ShapeAdaptor inputShape = operands.getShape(adaptor.getInput()); @@ -1285,16 +1293,18 @@ LogicalResult TransposeConv2DOp::inferReturnTypeComponents( if (!ShapedType::isDynamic(inputHeight) && !ShapedType::isDynamic(weightHeight)) { - int32_t calculateSize = + int64_t calculateSize = (inputHeight - 1) * stride[0] - padding[0] - padding[1] + weightHeight; - outputShape[1] = outputShape[1] == -1 ? calculateSize : outputShape[1]; + outputShape[1] = + ShapedType::isDynamic(outputShape[1]) ? calculateSize : outputShape[1]; } if (!ShapedType::isDynamic(inputWidth) && !ShapedType::isDynamic(weightWidth)) { - int32_t calculateSize = + int64_t calculateSize = (inputWidth - 1) * stride[1] - padding[2] - padding[3] + weightWidth; - outputShape[2] = outputShape[2] == -1 ? calculateSize : outputShape[2]; + outputShape[2] = + ShapedType::isDynamic(outputShape[2]) ? calculateSize : outputShape[2]; } inferredReturnShapes.push_back(ShapedTypeComponents(outputShape)); diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp index 936e50a..790648e 100644 --- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp @@ -19,6 +19,12 @@ using namespace mlir::tosa; namespace { +SmallVector ConvertFromMlirShape(ArrayRef shape) { + return to_vector(llvm::map_range(shape, [](int64_t dim) { + return ShapedType::isDynamic(dim) ? -1 : dim; + })); +} + struct Conv2DIsFullyConnected : public OpRewritePattern { explicit Conv2DIsFullyConnected(MLIRContext *context) : OpRewritePattern(context) {} @@ -52,16 +58,17 @@ struct Conv2DIsFullyConnected : public OpRewritePattern { // Reshape input to [N,IH,IW,IC] -> [N * IH * IW, IC]. ArrayRef inputShape = inputType.getShape(); - int64_t combined = inputShape[0] * inputShape[1] * inputShape[2]; - if (combined < 0) - combined = ShapedType::kDynamicSize; + int64_t combined = ShapedType::kDynamicSize; + if (numDynamic == 0) + combined = inputShape[0] * inputShape[1] * inputShape[2]; llvm::SmallVector revisedInputShape{combined, inputShape[3]}; auto revisedInputShapeType = RankedTensorType::get(revisedInputShape, inputType.getElementType()); auto reshapedInput = rewriter .create( op.getLoc(), revisedInputShapeType, input, - rewriter.getI64ArrayAttr(revisedInputShape)) + rewriter.getI64ArrayAttr( + ConvertFromMlirShape(revisedInputShape))) .getResult(); // Reshape kernel to [OC,KH,KW,IC] -> [OC, IC]. @@ -73,7 +80,8 @@ struct Conv2DIsFullyConnected : public OpRewritePattern { auto reshapedWeight = rewriter .create( op.getLoc(), revisedWeightShapeType, weight, - rewriter.getI64ArrayAttr(revisedWeightShape)) + rewriter.getI64ArrayAttr( + ConvertFromMlirShape(revisedWeightShape))) .getResult(); // Perform a fully connected network over the reshaped input and weight. @@ -102,7 +110,7 @@ struct Conv2DIsFullyConnected : public OpRewritePattern { inputShape[2], weightShape[0]}; rewriter.replaceOpWithNewOp( op, resultType, fullyConnectedValue, - rewriter.getI64ArrayAttr(outputShape)); + rewriter.getI64ArrayAttr(ConvertFromMlirShape(outputShape))); return success(); } }; diff --git a/mlir/lib/Dialect/Traits.cpp b/mlir/lib/Dialect/Traits.cpp index ce2feff..1b3b373 100644 --- a/mlir/lib/Dialect/Traits.cpp +++ b/mlir/lib/Dialect/Traits.cpp @@ -80,7 +80,7 @@ bool OpTrait::util::getBroadcastedShape(ArrayRef shape1, // Check each dimension is consistent. for (; i1 != e1 && i2 != e2; ++i1, ++i2, ++iR) { - if (*i1 == -1 || *i2 == -1) { + if (ShapedType::isDynamic(*i1) || ShapedType::isDynamic(*i2)) { // One or both dimensions is unknown. Follow TensorFlow behavior: // - If either dimension is greater than 1, we assume that the program is // correct, and the other dimension will be broadcast to match it. @@ -94,7 +94,7 @@ bool OpTrait::util::getBroadcastedShape(ArrayRef shape1, } else if (*i2 == 1) { *iR = *i1; } else { - *iR = -1; + *iR = ShapedType::kDynamicSize; } } else { if (*i1 == *i2 || *i2 == 1) { @@ -199,7 +199,8 @@ static bool isCompatibleInferredReturnShape(ArrayRef inferred, // then it is compatible, else if the inferred dim is 1 then it is also // compatible. But if the existing dim is 1 and the inferred is greater than // 1 then flag. - return dim1 == dim2 || dim1 == -1 || dim2 == -1 || dim1 == 1; + return dim1 == dim2 || ShapedType::isDynamic(dim1) || + ShapedType::isDynamic(dim2) || dim1 == 1; }; if (inferred.size() != existing.size()) return false; diff --git a/mlir/lib/IR/BuiltinTypes.cpp b/mlir/lib/IR/BuiltinTypes.cpp index 0136867..fe6d6ac 100644 --- a/mlir/lib/IR/BuiltinTypes.cpp +++ b/mlir/lib/IR/BuiltinTypes.cpp @@ -335,7 +335,7 @@ RankedTensorType::verify(function_ref emitError, ArrayRef shape, Type elementType, Attribute encoding) { for (int64_t s : shape) - if (s < -1) + if (s < 0 && !ShapedType::isDynamic(s)) return emitError() << "invalid tensor dimension size"; if (auto v = encoding.dyn_cast_or_null()) if (failed(v.verifyEncoding(shape, elementType, emitError))) @@ -656,9 +656,9 @@ LogicalResult MemRefType::verify(function_ref emitError, if (!BaseMemRefType::isValidElementType(elementType)) return emitError() << "invalid memref element type"; - // Negative sizes are not allowed except for `-1` that means dynamic size. + // Negative sizes are not allowed except for `kDynamicSize`. for (int64_t s : shape) - if (s < -1) + if (s < 0 && !ShapedType::isDynamic(s)) return emitError() << "invalid memref size"; assert(layout && "missing layout specification"); diff --git a/mlir/python/mlir/dialects/_tensor_ops_ext.py b/mlir/python/mlir/dialects/_tensor_ops_ext.py index 0f1b266..51d998b 100644 --- a/mlir/python/mlir/dialects/_tensor_ops_ext.py +++ b/mlir/python/mlir/dialects/_tensor_ops_ext.py @@ -30,7 +30,7 @@ class EmptyOp: if isinstance(s, int): static_sizes.append(s) else: - static_sizes.append(-1) + static_sizes.append(ShapedType.get_dynamic_size()) dynamic_sizes.append(s) result_type = RankedTensorType.get(static_sizes, element_type) op = self.build_generic( diff --git a/mlir/test/python/dialects/linalg/ops.py b/mlir/test/python/dialects/linalg/ops.py index e14ec42..367aa331 100644 --- a/mlir/test/python/dialects/linalg/ops.py +++ b/mlir/test/python/dialects/linalg/ops.py @@ -23,7 +23,8 @@ def testFill(): # CHECK-NEXT: %[[CST:.*]] = arith.constant 0.0{{.*}} : f32 # CHECK-NEXT: %[[RES:.*]] = linalg.fill ins(%[[CST]] : f32) outs(%[[OUT]] : tensor<12x?xf32>) -> tensor<12x?xf32> # CHECK-NEXT: return %[[RES]] : tensor<12x?xf32> - @func.FuncOp.from_py_func(RankedTensorType.get((12, -1), f32)) + @func.FuncOp.from_py_func( + RankedTensorType.get((12, ShapedType.get_dynamic_size()), f32)) def fill_tensor(out): zero = arith.ConstantOp(value=FloatAttr.get(f32, 0.), result=f32).result return linalg.fill(zero, outs=[out]) @@ -33,7 +34,8 @@ def testFill(): # CHECK-NEXT: %[[CST:.*]] = arith.constant 0.0{{.*}} : f32 # CHECK-NEXT: linalg.fill ins(%[[CST]] : f32) outs(%[[OUT]] : memref<12x?xf32>) # CHECK-NEXT: return - @func.FuncOp.from_py_func(MemRefType.get((12, -1), f32)) + @func.FuncOp.from_py_func( + MemRefType.get((12, ShapedType.get_dynamic_size()), f32)) def fill_buffer(out): zero = arith.ConstantOp(value=FloatAttr.get(f32, 0.), result=f32).result linalg.fill(zero, outs=[out]) diff --git a/mlir/test/python/dialects/shape.py b/mlir/test/python/dialects/shape.py index 849b5ef..2ebad0d 100644 --- a/mlir/test/python/dialects/shape.py +++ b/mlir/test/python/dialects/shape.py @@ -20,7 +20,7 @@ def testConstShape(): f32 = F32Type.get() with InsertionPoint(module.body): @func.FuncOp.from_py_func( - RankedTensorType.get((12, -1), f32)) + RankedTensorType.get((12, ShapedType.get_dynamic_size()), f32)) def const_shape_tensor(arg): return shape.ConstShapeOp( DenseElementsAttr.get(np.array([10, 20], dtype=np.int64), type=IndexType.get())) diff --git a/mlir/test/python/dialects/tensor.py b/mlir/test/python/dialects/tensor.py index cb05feb..f7f73a1 100644 --- a/mlir/test/python/dialects/tensor.py +++ b/mlir/test/python/dialects/tensor.py @@ -21,7 +21,10 @@ def testDimOp(): indexType = IndexType.get() with InsertionPoint(module.body): - @func.FuncOp.from_py_func(RankedTensorType.get((-1, -1), f32Type)) + @func.FuncOp.from_py_func( + RankedTensorType.get( + (ShapedType.get_dynamic_size(), ShapedType.get_dynamic_size()), + f32Type)) # CHECK: func @tensor_static_dim # CHECK-SAME: %[[ARG0:.+]]: tensor # CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index diff --git a/mlir/test/python/dialects/vector.py b/mlir/test/python/dialects/vector.py index 8f8d7f1..83c0961 100644 --- a/mlir/test/python/dialects/vector.py +++ b/mlir/test/python/dialects/vector.py @@ -35,7 +35,9 @@ def testTransferReadOp(): module = Module.create() with InsertionPoint(module.body): vector_type = VectorType.get([2, 3], F32Type.get()) - memref_type = MemRefType.get([-1, -1], F32Type.get()) + memref_type = MemRefType.get( + [ShapedType.get_dynamic_size(), + ShapedType.get_dynamic_size()], F32Type.get()) index_type = IndexType.get() mask_type = VectorType.get(vector_type.shape, IntegerType.get_signless(1)) identity_map = AffineMap.get_identity(vector_type.rank) diff --git a/mlir/unittests/Dialect/BroadcastShapeTest.cpp b/mlir/unittests/Dialect/BroadcastShapeTest.cpp index de9b733..f1ab02d 100644 --- a/mlir/unittests/Dialect/BroadcastShapeTest.cpp +++ b/mlir/unittests/Dialect/BroadcastShapeTest.cpp @@ -47,9 +47,10 @@ TEST(BroadcastShapeTest, InterleavingOnes) { TEST(BroadcastShapeTest, InterleavingUnknowns) { SmallVector result; - ASSERT_TRUE( - getBroadcastedShape({1, 2, -1, -1, -1}, {-1, -1, -1, 4, 1}, result)); - EXPECT_THAT(result, ElementsAre(-1, 2, -1, 4, -1)); + int64_t dyn = mlir::ShapedType::kDynamicSize; + ASSERT_TRUE(getBroadcastedShape({1, 2, dyn, dyn, dyn}, {dyn, dyn, dyn, 4, 1}, + result)); + EXPECT_THAT(result, ElementsAre(dyn, 2, dyn, 4, dyn)); } TEST(BroadcastShapeTest, IncompatibleLowDim) { -- 2.7.4