Merge kDynamicSize and kDynamicSentinel into one constant.
authorAliia Khasanova <aliia@google.com>
Fri, 18 Nov 2022 18:00:10 +0000 (18:00 +0000)
committerAliia Khasanova <aliia@google.com>
Mon, 21 Nov 2022 13:01:26 +0000 (13:01 +0000)
resolve conflicts

Differential Revision: https://reviews.llvm.org/D138282

71 files changed:
flang/include/flang/Optimizer/Dialect/FIRTypes.td
flang/lib/Optimizer/Transforms/AffinePromotion.cpp
mlir/include/mlir/Dialect/Arith/Utils/Utils.h
mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td
mlir/include/mlir/Dialect/Shape/IR/Shape.h
mlir/include/mlir/Dialect/Shape/IR/ShapeBase.td
mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td
mlir/include/mlir/Dialect/Tosa/Utils/ShapeUtils.h
mlir/include/mlir/IR/BuiltinAttributes.h
mlir/include/mlir/IR/BuiltinAttributes.td
mlir/include/mlir/IR/BuiltinTypeInterfaces.td
mlir/include/mlir/IR/BuiltinTypes.h
mlir/include/mlir/IR/BuiltinTypes.td
mlir/include/mlir/Interfaces/InferTypeOpInterface.h
mlir/include/mlir/Interfaces/ViewLikeInterface.td
mlir/lib/AsmParser/AttributeParser.cpp
mlir/lib/AsmParser/TypeParser.cpp
mlir/lib/CAPI/IR/BuiltinTypes.cpp
mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp
mlir/lib/Conversion/LLVMCommon/MemRefBuilder.cpp
mlir/lib/Conversion/LLVMCommon/Pattern.cpp
mlir/lib/Conversion/LLVMCommon/TypeConverter.cpp
mlir/lib/Conversion/LinalgToStandard/LinalgToStandard.cpp
mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
mlir/lib/Dialect/Affine/Analysis/Utils.cpp
mlir/lib/Dialect/Affine/Transforms/PipelineDataTransfer.cpp
mlir/lib/Dialect/Affine/Utils/Utils.cpp
mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
mlir/lib/Dialect/Bufferization/Transforms/BufferResultsToOutParams.cpp
mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp
mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
mlir/lib/Dialect/Linalg/Transforms/HoistPadding.cpp
mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp
mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
mlir/lib/Dialect/Linalg/Utils/Utils.cpp
mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
mlir/lib/Dialect/MemRef/Transforms/SimplifyExtractStridedMetadata.cpp
mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
mlir/lib/Dialect/Shape/IR/Shape.cpp
mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp
mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp
mlir/lib/Dialect/Tensor/Utils/Utils.cpp
mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp
mlir/lib/Dialect/Traits.cpp
mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp
mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp
mlir/lib/IR/BuiltinAttributes.cpp
mlir/lib/IR/BuiltinTypeInterfaces.cpp
mlir/lib/IR/BuiltinTypes.cpp
mlir/lib/Interfaces/ViewLikeInterface.cpp
mlir/test/lib/Analysis/TestMemRefStrideCalculation.cpp
mlir/test/lib/Dialect/Test/TestDialect.cpp
mlir/unittests/Dialect/BroadcastShapeTest.cpp

index eaf43a6..df35129 100644 (file)
@@ -128,7 +128,7 @@ def fir_CharacterType : FIR_Type<"Character", "char"> {
     static constexpr LenType singleton() { return 1; }
 
     /// Character has a LEN value which is not a compile-time known constant.
-    static constexpr LenType unknownLen() { return mlir::ShapedType::kDynamicSize; }
+    static constexpr LenType unknownLen() { return mlir::ShapedType::kDynamic; }
 
     /// Character LEN is a runtime value.
     bool hasDynamicLen() { return getLen() == unknownLen(); }
@@ -482,9 +482,9 @@ def fir_SequenceType : FIR_Type<"Sequence", "array"> {
     // Does the sequence have unknown shape? (`array<* x T>`)
     bool hasUnknownShape() const { return getShape().empty(); }
 
-    // The value `kDynamicSize` represents an unknown extent for a dimension
+    // The value `kDynamic` represents an unknown extent for a dimension
     static constexpr Extent getUnknownExtent() {
-      return mlir::ShapedType::kDynamicSize;
+      return mlir::ShapedType::kDynamic;
     }
   }];
 }
index eaf0cd1..558ba86 100644 (file)
@@ -411,7 +411,7 @@ createAffineOps(mlir::Value arrayRef, mlir::PatternRewriter &rewriter) {
                                                           affineMap, indexArgs);
   auto arrayElementType = coordinateArrayElement(acoOp);
   auto newType =
-      mlir::MemRefType::get({mlir::ShapedType::kDynamicSize}, arrayElementType);
+      mlir::MemRefType::get({mlir::ShapedType::kDynamic}, arrayElementType);
   auto arrayConvert = rewriter.create<fir::ConvertOp>(acoOp.getLoc(), newType,
                                                       acoOp.getMemref());
   return std::make_pair(affineApply, arrayConvert);
index d7aa7db..8b8c000 100644 (file)
@@ -54,9 +54,9 @@ public:
     SmallVector<OpFoldResult> mixedOffsets(op.getMixedOffsets());
     SmallVector<OpFoldResult> mixedSizes(op.getMixedSizes());
     SmallVector<OpFoldResult> mixedStrides(op.getMixedStrides());
-    canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamicStrideOrOffset);
+    canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamic);
     canonicalizeSubViewPart(mixedSizes, ShapedType::isDynamic);
-    canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamicStrideOrOffset);
+    canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamic);
 
     // Create the new op in canonical form.
     ResultTypeFunc resultTypeFunc;
index 17f6bf4..c091767 100644 (file)
@@ -658,7 +658,7 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
       /*desc=*/[{
         Like `getShape`, but only returns statically-known information, without
         generating any new IR. For each shape dimension, returns >=0 if that
-        dimension is statically known, or ShapeType::kDynamicSize otherwise.
+        dimension is statically known, or ShapeType::kDynamic otherwise.
       }],
       /*retTy=*/"SmallVector<int64_t>",
       /*methodName=*/"getStaticShape",
@@ -675,7 +675,7 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
       /*desc=*/[{
         Returns the statically-known loop ranges. Composes
         `getShapesToLoopsMap()` with the result of `getStaticShape`.
-        Returns ShapeType::kDynamicSize for non-statically-known loop ranges.
+        Returns ShapeType::kDynamic for non-statically-known loop ranges.
         This is expected to be called by a valid Linalg op
       }],
       /*retTy=*/"SmallVector<int64_t, 4>",
index d6fae79..9ee5d3d 100644 (file)
@@ -438,7 +438,7 @@ def SplitOp : Op<Transform_Dialect, "structured.split",
     static split point attribute when it is known at transform IR construction
     time or as the handle to an operation producing a single index-typed value
     when it is computed by payload IR. In the latter case, the static split
-    point must be set to `ShapedType::kDynamicSize` and the dynamic size handle
+    point must be set to `ShapedType::kDynamic` and the dynamic size handle
     must point to as many value-producing operations as there are structured
     operations pointed to by the target handle.
 
@@ -809,9 +809,9 @@ def TileOp : Op<Transform_Dialect, "structured.tile",
     case the tile value must be computed by the payload IR and the handle to the
     operation computing it must be provided through `dynamic_sizes`. When the
     sizes are not known statically, the corresponding entry in the
-    `static_sizes` attribute must be set to `ShapedType::kDynamicSize`. Only
+    `static_sizes` attribute must be set to `ShapedType::kDynamic`. Only
     the dynamic sizes must be provided in `dynamic_sizes`, i.e., there should
-    be as many handles as `ShapedType::kDynamicSize` values in the
+    be as many handles as `ShapedType::kDynamic` values in the
     `static_sizes` attribute. A static size of `0` indicates that the dimension
     should not be tiled. No loop will be generated for such dimensions. If all
     tile sizes are `0`, this transform is effectively a no-op.
@@ -950,10 +950,10 @@ def TileToForeachThreadOp :
     $target oilist(
         `num_threads` custom<DynamicIndexList>($num_threads,
                                                $static_num_threads,
-                                               "ShapedType::kDynamicSize") |
+                                               "ShapedType::kDynamic") |
          `tile_sizes` custom<DynamicIndexList>($tile_sizes,
                                                $static_tile_sizes,
-                                               "ShapedType::kDynamicSize"))
+                                               "ShapedType::kDynamic"))
     (`(` `mapping` `=` $mapping^ `)`)? attr-dict
   }];
   let hasVerifier = 1;
@@ -981,9 +981,9 @@ def TileToScfForOp : Op<Transform_Dialect, "structured.tile_to_scf_for",
     case the tile value must be computed by the payload IR and the handle to the
     operation computing it must be provided through `dynamic_sizes`. When the
     sizes are not known statically, the corresponding entry in the
-    `static_sizes` attribute must be set to `ShapedType::kDynamicSize`. Only
+    `static_sizes` attribute must be set to `ShapedType::kDynamic`. Only
     the dynamic sizes must be provided in `dynamic_sizes`, i.e., there should
-    be as many handles as `ShapedType::kDynamicSize` values in the
+    be as many handles as `ShapedType::kDynamic` values in the
     `static_sizes` attribute. A static size of `0` indicates that the dimension
     should not be tiled. No loop will be generated for such dimensions. If all
     tile sizes are `0`, this transform is effectively a no-op.
index b364f68..319c089 100644 (file)
@@ -1268,13 +1268,13 @@ def MemRef_ReinterpretCastOp
   let assemblyFormat = [{
     $source `to` `offset` `` `:`
     custom<DynamicIndexList>($offsets, $static_offsets,
-                               "ShapedType::kDynamicStrideOrOffset")
+                               "ShapedType::kDynamic")
     `` `,` `sizes` `` `:`
     custom<DynamicIndexList>($sizes, $static_sizes,
-                               "ShapedType::kDynamicSize")
+                               "ShapedType::kDynamic")
     `` `,` `strides` `` `:`
     custom<DynamicIndexList>($strides, $static_strides,
-                               "ShapedType::kDynamicStrideOrOffset")
+                               "ShapedType::kDynamic")
     attr-dict `:` type($source) `to` type($result)
   }];
 
@@ -1739,8 +1739,8 @@ def SubViewOp : MemRef_OpWithOffsetSizesAndStrides<"subview", [
     The representation based on offsets, sizes and strides support a
     partially-static specification via attributes specified through the
     `static_offsets`, `static_sizes` and `static_strides` arguments. A special
-    sentinel value ShapedType::kDynamicSize and
-    ShapedType::kDynamicStrideOrOffset encodes that the corresponding entry has
+    sentinel value ShapedType::kDynamic and
+    ShapedType::kDynamic encodes that the corresponding entry has
     a dynamic value.
 
     A subview operation may additionally reduce the rank of the resulting view
@@ -1866,11 +1866,11 @@ def SubViewOp : MemRef_OpWithOffsetSizesAndStrides<"subview", [
   let assemblyFormat = [{
     $source ``
     custom<DynamicIndexList>($offsets, $static_offsets,
-                               "ShapedType::kDynamicStrideOrOffset")
+                               "ShapedType::kDynamic")
     custom<DynamicIndexList>($sizes, $static_sizes,
-                               "ShapedType::kDynamicSize")
+                               "ShapedType::kDynamic")
     custom<DynamicIndexList>($strides, $static_strides,
-                               "ShapedType::kDynamicStrideOrOffset")
+                               "ShapedType::kDynamic")
     attr-dict `:` type($source) `to` type($result)
   }];
 
index c0598d3..8d2e310 100644 (file)
@@ -35,7 +35,7 @@ namespace shape {
 
 /// Alias type for extent tensors.
 RankedTensorType getExtentTensorType(MLIRContext *ctx,
-                                     int64_t rank = ShapedType::kDynamicSize);
+                                     int64_t rank = ShapedType::kDynamic);
 
 // Check if a type is an extent tensor, e.g., tensor<?xindex>.
 bool isExtentTensorType(Type);
index 993092d..9c02579 100644 (file)
@@ -91,7 +91,7 @@ def Shape_ValueShapeType : Shape_Type<"ValueShape", "value_shape"> {
 
 def Shape_ExtentTensorType :
     1DTensorOf<[Index]>,
-    BuildableType<"::mlir::RankedTensorType::get({ShapedType::kDynamicSize}, "
+    BuildableType<"::mlir::RankedTensorType::get({ShapedType::kDynamic}, "
                   "$_builder.getType<::mlir::IndexType>())"> {
   let description = [{
     The extent tensor is a tensor of rank one with arbitrarily many index
index 552d2db..352002b 100644 (file)
@@ -278,8 +278,8 @@ def Tensor_ExtractSliceOp : Tensor_OpWithOffsetSizesAndStrides<"extract_slice",
     The representation based on offsets, sizes and strides support a
     partially-static specification via attributes specified through the
     `static_offsets`, `static_sizes` and `static_strides` arguments. A special
-    sentinel value ShapedType::kDynamicSize and
-    ShapedType::kDynamicStrideOrOffset encodes that the corresponding entry has
+    sentinel value ShapedType::kDynamic and
+    ShapedType::kDynamic encodes that the corresponding entry has
     a dynamic value.
 
     After buffer allocation, the "extract_slice" op is expected to lower into a
@@ -335,11 +335,11 @@ def Tensor_ExtractSliceOp : Tensor_OpWithOffsetSizesAndStrides<"extract_slice",
   let assemblyFormat = [{
     $source ``
     custom<DynamicIndexList>($offsets, $static_offsets,
-                               "ShapedType::kDynamicStrideOrOffset")
+                               "ShapedType::kDynamic")
     custom<DynamicIndexList>($sizes, $static_sizes,
-                               "ShapedType::kDynamicSize")
+                               "ShapedType::kDynamic")
     custom<DynamicIndexList>($strides, $static_strides,
-                               "ShapedType::kDynamicStrideOrOffset")
+                               "ShapedType::kDynamic")
     attr-dict `:` type($source) `to` type($result)
   }];
 
@@ -772,8 +772,8 @@ def Tensor_InsertSliceOp : Tensor_OpWithOffsetSizesAndStrides<"insert_slice", [
     The representation based on offsets, sizes and strides support a
     partially-static specification via attributes specified through the
     `static_offsets`, `static_sizes` and `static_strides` arguments. A special
-    sentinel value ShapedType::kDynamicSize and
-    ShapedType::kDynamicStrideOrOffset encodes that the corresponding entry has
+    sentinel value ShapedType::kDynamic and
+    ShapedType::kDynamic encodes that the corresponding entry has
     a dynamic value.
 
     After buffer allocation, the "insert_slice" op is expected to lower into a
@@ -819,11 +819,11 @@ def Tensor_InsertSliceOp : Tensor_OpWithOffsetSizesAndStrides<"insert_slice", [
   let assemblyFormat = [{
     $source `into` $dest ``
     custom<DynamicIndexList>($offsets, $static_offsets,
-                               "ShapedType::kDynamicStrideOrOffset")
+                               "ShapedType::kDynamic")
     custom<DynamicIndexList>($sizes, $static_sizes,
-                               "ShapedType::kDynamicSize")
+                               "ShapedType::kDynamic")
     custom<DynamicIndexList>($strides, $static_strides,
-                               "ShapedType::kDynamicStrideOrOffset")
+                               "ShapedType::kDynamic")
     attr-dict `:` type($source) `into` type($dest)
   }];
 
@@ -1222,9 +1222,9 @@ def Tensor_PadOp : Tensor_Op<"pad", [
     $source
     (`nofold` $nofold^)?
     `low` `` custom<DynamicIndexList>($low, $static_low,
-                                        "ShapedType::kDynamicSize")
+                                        "ShapedType::kDynamic")
     `high` `` custom<DynamicIndexList>($high, $static_high,
-                                         "ShapedType::kDynamicSize")
+                                         "ShapedType::kDynamic")
     $region attr-dict `:` type($source) `to` type($result)
   }];
 
@@ -1377,8 +1377,8 @@ def Tensor_ParallelInsertSliceOp : Tensor_Op<"parallel_insert_slice", [
     The representation based on offsets, sizes and strides support a
     partially-static specification via attributes specified through the
     `static_offsets`, `static_sizes` and `static_strides` arguments. A special
-    sentinel value ShapedType::kDynamicSize and
-    ShapedType::kDynamicStrideOrOffset encodes that the corresponding entry has
+    sentinel value ShapedType::kDynamic and
+    ShapedType::kDynamic encodes that the corresponding entry has
     a dynamic value.
 
     After buffer allocation, the "parallel_insert_slice" op is expected to lower
@@ -1412,11 +1412,11 @@ def Tensor_ParallelInsertSliceOp : Tensor_Op<"parallel_insert_slice", [
   let assemblyFormat = [{
     $source `into` $dest ``
     custom<DynamicIndexList>($offsets, $static_offsets,
-                               "ShapedType::kDynamicStrideOrOffset")
+                               "ShapedType::kDynamic")
     custom<DynamicIndexList>($sizes, $static_sizes,
-                               "ShapedType::kDynamicSize")
+                               "ShapedType::kDynamic")
     custom<DynamicIndexList>($strides, $static_strides,
-                               "ShapedType::kDynamicStrideOrOffset")
+                               "ShapedType::kDynamic")
     attr-dict `:` type($source) `into` type($dest)
   }];
 
index 1bfacd0..413988d 100644 (file)
@@ -111,14 +111,14 @@ struct ValueKnowledge {
       return result;
 
     result.hasRank = true;
-    result.sizes.resize(lhs.sizes.size(), ShapedType::kDynamicSize);
+    result.sizes.resize(lhs.sizes.size(), ShapedType::kDynamic);
     for (auto i : llvm::seq<unsigned>(0, result.sizes.size())) {
       int64_t lhsSize = lhs.sizes[i];
       int64_t rhsSize = rhs.sizes[i];
       int64_t &resultSize = result.sizes[i];
-      if (lhsSize == ShapedType::kDynamicSize) {
+      if (lhsSize == ShapedType::kDynamic) {
         resultSize = rhsSize;
-      } else if (rhsSize == ShapedType::kDynamicSize) {
+      } else if (rhsSize == ShapedType::kDynamic) {
         resultSize = lhsSize;
       } else if (lhsSize == rhsSize) {
         resultSize = lhsSize;
@@ -155,7 +155,7 @@ struct ValueKnowledge {
     }
 
     result.hasRank = true;
-    result.sizes.resize(lhs.sizes.size(), ShapedType::kDynamicSize);
+    result.sizes.resize(lhs.sizes.size(), ShapedType::kDynamic);
     for (int i = 0, e = lhs.sizes.size(); i < e; i++) {
       if (lhs.sizes[i] == rhs.sizes[i]) {
         result.sizes[i] = lhs.sizes[i];
@@ -170,7 +170,7 @@ struct ValueKnowledge {
   // Whether the value has known rank.
   bool hasRank;
   // If `hasRank`, the sizes along each rank. Unknown sizes are represented as
-  // `ShapedType::kDynamicSize`.
+  // `ShapedType::kDynamic`.
   llvm::SmallVector<int64_t> sizes;
   // The dtype of a tensor.
   // This is equal to nullptr if we don't know that it is a specific concrete
index 17687ed..e6902fb 100644 (file)
@@ -1034,7 +1034,7 @@ inline bool operator!=(StringRef lhs, StringAttr rhs) { return !(lhs == rhs); }
 
 namespace mlir {
 
-/// Given a list of strides (in which MemRefType::getDynamicStrideOrOffset()
+/// Given a list of strides (in which ShapedType::kDynamic
 /// represents a dynamic value), return the single result AffineMap which
 /// represents the linearized strided layout map. Dimensions correspond to the
 /// offset followed by the strides in order. Symbols are inserted for each
index 70f4732..70a535a 100644 (file)
@@ -1009,7 +1009,7 @@ def StridedLayoutAttr : Builtin_Attr<"StridedLayout",
     Strides must be positive and the offset must be non-negative. Both the
     strides and the offset may be _dynamic_, i.e. their value may not be known
     at compile time. This is expressed as a `?` in the assembly syntax and as
-    `ShapedType::kDynamicStrideOrOffset` in the code. Stride and offset values
+    `ShapedType::kDynamic` in the code. Stride and offset values
     must satisfy the constraints above at runtime, the behavior is undefined
     otherwise.
 
index 6ecc067..08efe25 100644 (file)
@@ -54,7 +54,7 @@ def ShapedTypeInterface : TypeInterface<"ShapedType"> {
     A shape is a list of sizes corresponding to the dimensions of the container.
     If the number of dimensions in the shape is unknown, the shape is "unranked".
     If the number of dimensions is known, the shape "ranked". The sizes of the
-    dimensions of the shape must be positive, or kDynamicSize (in which case the
+    dimensions of the shape must be positive, or kDynamic (in which case the
     size of the dimension is dynamic, or not statically known).
   }];
   let methods = [
@@ -84,25 +84,18 @@ def ShapedTypeInterface : TypeInterface<"ShapedType"> {
   ];
 
   let extraClassDeclaration = [{
-    // TODO: merge these two special values in a single one used everywhere.
-    // Unfortunately, uses of `-1` have crept deep into the codebase now and are
-    // hard to track.
-    static constexpr int64_t kDynamicSize =
-        std::numeric_limits<int64_t>::min();
-    static constexpr int64_t kDynamicStrideOrOffset =
+    static constexpr int64_t kDynamic =
         std::numeric_limits<int64_t>::min();
 
     /// Whether the given dimension size indicates a dynamic dimension.
-    static constexpr bool isDynamic(int64_t dSize) {
-      return dSize == kDynamicSize;
+    static constexpr bool isDynamic(int64_t dValue) {
+       return dValue == kDynamic;
     }
+
     /// Whether the given shape has any size that indicates a dynamic dimension.
     static bool isDynamicShape(ArrayRef<int64_t> dSizes) {
       return any_of(dSizes, [](int64_t dSize) { return isDynamic(dSize); });
     }
-    static constexpr bool isDynamicStrideOrOffset(int64_t dStrideOrOffset) {
-      return dStrideOrOffset == kDynamicStrideOrOffset;
-    }
 
     /// Return the number of elements present in the given shape.
     static int64_t getNumElements(ArrayRef<int64_t> shape);
index ceba71d..03fc709 100644 (file)
@@ -429,7 +429,7 @@ inline bool TensorType::classof(Type type) {
 ///      symbols.
 ///
 /// A stride specification is a list of integer values that are either static
-/// or dynamic (encoded with ShapedType::kDynamicStrideOrOffset). Strides encode
+/// or dynamic (encoded with ShapedType::kDynamic). Strides encode
 /// the distance in the number of elements between successive entries along a
 /// particular dimension.
 LogicalResult getStridesAndOffset(MemRefType t,
index fbd9c63..c37463c 100644 (file)
@@ -584,12 +584,6 @@ def Builtin_MemRef : Builtin_Type<"MemRef", [
     /// New `Attribute getMemorySpace()` method should be used instead.
     unsigned getMemorySpaceAsInt() const;
 
-    // TODO: merge these two special values in a single one used everywhere.
-    // Unfortunately, uses of `-1` have crept deep into the codebase now and are
-    // hard to track.
-    static int64_t getDynamicStrideOrOffset() {
-      return ShapedType::kDynamicStrideOrOffset;
-    }
   }];
   let skipDefaultBuilders = 1;
   let genVerifyDecl = 1;
index 89bc251..c89c728 100644 (file)
@@ -94,7 +94,7 @@ private:
 /// The components consist of
 ///  - A ranked or unranked shape with the dimension specification match those
 ///    of ShapeType's getShape() (e.g., dynamic dimension represented using
-///    ShapedType::kDynamicSize)
+///    ShapedType::kDynamic)
 ///  - A element type, may be unset (nullptr)
 ///  - A attribute, may be unset (nullptr)
 /// Used by ShapedType type inferences.
index e3fac80..ea94f65 100644 (file)
@@ -50,8 +50,8 @@ def OffsetSizeAndStrideOpInterface : OpInterface<"OffsetSizeAndStrideOpInterface
          `getArrayAttrMaxRanks()`[0] (resp. [1], [2]).
       3. if an entry of `static_offsets` (resp. `static_sizes`,
          `static_strides`) is equal to a special sentinel value, namely
-         `ShapedType::kDynamicStrideOrOffset` (resp. `ShapedType::kDynamicSize`,
-         `ShapedType::kDynamicStrideOrOffset`), then the corresponding entry is
+         `ShapedType::kDynamic` (resp. `ShapedType::kDynamic`,
+         `ShapedType::kDynamic`), then the corresponding entry is
          a dynamic offset (resp. size, stride).
       4. a variadic `offset` (resp. `sizes`, `strides`) operand  must be present
          for each dynamic offset (resp. size, stride).
@@ -206,7 +206,7 @@ def OffsetSizeAndStrideOpInterface : OpInterface<"OffsetSizeAndStrideOpInterface
       /*defaultImplementation=*/[{
         ::llvm::APInt v = *(static_offsets()
           .template getAsValueRange<::mlir::IntegerAttr>().begin() + idx);
-        return ::mlir::ShapedType::isDynamicStrideOrOffset(v.getSExtValue());
+        return ::mlir::ShapedType::isDynamic(v.getSExtValue());
       }]
     >,
     InterfaceMethod<
@@ -234,7 +234,7 @@ def OffsetSizeAndStrideOpInterface : OpInterface<"OffsetSizeAndStrideOpInterface
       /*defaultImplementation=*/[{
         ::llvm::APInt v = *(static_strides()
           .template getAsValueRange<::mlir::IntegerAttr>().begin() + idx);
-        return ::mlir::ShapedType::isDynamicStrideOrOffset(v.getSExtValue());
+        return ::mlir::ShapedType::isDynamic(v.getSExtValue());
       }]
     >,
     InterfaceMethod<
@@ -296,7 +296,7 @@ def OffsetSizeAndStrideOpInterface : OpInterface<"OffsetSizeAndStrideOpInterface
         assert($_op.isDynamicOffset(idx) && "expected dynamic offset");
         auto numDynamic = getNumDynamicEntriesUpToIdx(
           static_offsets().template cast<::mlir::ArrayAttr>(),
-          ::mlir::ShapedType::isDynamicStrideOrOffset,
+          ::mlir::ShapedType::isDynamic,
           idx);
         return $_op.getOffsetSizeAndStrideStartOperandIndex() + numDynamic;
       }]
@@ -331,7 +331,7 @@ def OffsetSizeAndStrideOpInterface : OpInterface<"OffsetSizeAndStrideOpInterface
         assert($_op.isDynamicStride(idx) && "expected dynamic stride");
         auto numDynamic = getNumDynamicEntriesUpToIdx(
           static_strides().template cast<::mlir::ArrayAttr>(),
-          ::mlir::ShapedType::isDynamicStrideOrOffset,
+          ::mlir::ShapedType::isDynamic,
           idx);
         return $_op.getOffsetSizeAndStrideStartOperandIndex() +
           offsets().size() + sizes().size() + numDynamic;
index 1e79b44..418dc7b 100644 (file)
@@ -1170,7 +1170,7 @@ Attribute Parser::parseStridedLayoutAttr() {
   // fit into int64_t limits.
   auto parseStrideOrOffset = [&]() -> Optional<int64_t> {
     if (consumeIf(Token::question))
-      return ShapedType::kDynamicStrideOrOffset;
+      return ShapedType::kDynamic;
 
     SMLoc loc = getToken().getLoc();
     auto emitWrongTokenError = [&] {
index fc8c3fd..8ab9ef5 100644 (file)
@@ -514,7 +514,7 @@ Parser::parseDimensionListRanked(SmallVectorImpl<int64_t> &dimensions,
     if (consumeIf(Token::question)) {
       if (!allowDynamic)
         return emitError(loc, "expected static shape");
-      dimensions.push_back(ShapedType::kDynamicSize);
+      dimensions.push_back(ShapedType::kDynamic);
     } else {
       int64_t value;
       if (failed(parseIntegerInDimensionList(value)))
index 596a760..6b6ba6e 100644 (file)
@@ -165,18 +165,18 @@ int64_t mlirShapedTypeGetDimSize(MlirType type, intptr_t dim) {
   return unwrap(type).cast<ShapedType>().getDimSize(static_cast<unsigned>(dim));
 }
 
-int64_t mlirShapedTypeGetDynamicSize() { return ShapedType::kDynamicSize; }
+int64_t mlirShapedTypeGetDynamicSize() { return ShapedType::kDynamic; }
 
 bool mlirShapedTypeIsDynamicSize(int64_t size) {
   return ShapedType::isDynamic(size);
 }
 
 bool mlirShapedTypeIsDynamicStrideOrOffset(int64_t val) {
-  return ShapedType::isDynamicStrideOrOffset(val);
+  return ShapedType::isDynamic(val);
 }
 
 int64_t mlirShapedTypeGetDynamicStrideOrOffset() {
-  return ShapedType::kDynamicStrideOrOffset;
+  return ShapedType::kDynamic;
 }
 
 //===----------------------------------------------------------------------===//
index b88dcb2..85064a4 100644 (file)
@@ -203,7 +203,7 @@ struct RawBufferOpLowering : public ConvertOpToLLVMPattern<GpuOp> {
       size_t i = pair.index();
       Value index = pair.value();
       Value strideOp;
-      if (ShapedType::isDynamicStrideOrOffset(strides[i])) {
+      if (ShapedType::isDynamic(strides[i])) {
         strideOp = rewriter.create<LLVM::MulOp>(
             loc, memrefDescriptor.stride(rewriter, loc, i), byteWidthConst);
       } else {
@@ -226,7 +226,7 @@ struct RawBufferOpLowering : public ConvertOpToLLVMPattern<GpuOp> {
     Value sgprOffset = adaptor.getSgprOffset();
     if (!sgprOffset)
       sgprOffset = createI32Constant(rewriter, loc, 0);
-    if (ShapedType::isDynamicStrideOrOffset(offset))
+    if (ShapedType::isDynamic(offset))
       sgprOffset = rewriter.create<LLVM::AddOp>(
           loc, memrefDescriptor.offset(rewriter, loc), sgprOffset);
     else if (offset > 0)
index 4f72cd1..9414b6f 100644 (file)
@@ -57,9 +57,9 @@ MemRefDescriptor MemRefDescriptor::fromStaticShape(
   auto result = getStridesAndOffset(type, strides, offset);
   (void)result;
   assert(succeeded(result) && "unexpected failure in stride computation");
-  assert(!ShapedType::isDynamicStrideOrOffset(offset) &&
+  assert(!ShapedType::isDynamic(offset) &&
          "expected static offset");
-  assert(!llvm::any_of(strides, ShapedType::isDynamicStrideOrOffset) &&
+  assert(!llvm::any_of(strides, ShapedType::isDynamic) &&
          "expected static strides");
 
   auto convertedType = typeConverter.convertType(type);
index 8413dcf..14799f8 100644 (file)
@@ -80,14 +80,14 @@ Value ConvertToLLVMPattern::getStridedElementPtr(
 
   Value index;
   if (offset != 0) // Skip if offset is zero.
-    index = ShapedType::isDynamicStrideOrOffset(offset)
+    index = ShapedType::isDynamic(offset)
                 ? memRefDescriptor.offset(rewriter, loc)
                 : createIndexConstant(rewriter, loc, offset);
 
   for (int i = 0, e = indices.size(); i < e; ++i) {
     Value increment = indices[i];
     if (strides[i] != 1) { // Skip if stride is 1.
-      Value stride = ShapedType::isDynamicStrideOrOffset(strides[i])
+      Value stride = ShapedType::isDynamic(strides[i])
                          ? memRefDescriptor.stride(rewriter, loc, i)
                          : createIndexConstant(rewriter, loc, strides[i]);
       increment = rewriter.create<LLVM::MulOp>(loc, increment, stride);
@@ -123,14 +123,14 @@ void ConvertToLLVMPattern::getMemRefDescriptorSizes(
     SmallVectorImpl<Value> &strides, Value &sizeBytes) const {
   assert(isConvertibleAndHasIdentityMaps(memRefType) &&
          "layout maps must have been normalized away");
-  assert(count(memRefType.getShape(), ShapedType::kDynamicSize) ==
+  assert(count(memRefType.getShape(), ShapedType::kDynamic) ==
              static_cast<ssize_t>(dynamicSizes.size()) &&
          "dynamicSizes size doesn't match dynamic sizes count in memref shape");
 
   sizes.reserve(memRefType.getRank());
   unsigned dynamicIndex = 0;
   for (int64_t size : memRefType.getShape()) {
-    sizes.push_back(size == ShapedType::kDynamicSize
+    sizes.push_back(size == ShapedType::kDynamic
                         ? dynamicSizes[dynamicIndex++]
                         : createIndexConstant(rewriter, loc, size));
   }
@@ -146,14 +146,14 @@ void ConvertToLLVMPattern::getMemRefDescriptorSizes(
     if (size == 0)
       continue;
     bool useSizeAsStride = stride == 1;
-    if (size == ShapedType::kDynamicSize)
-      stride = ShapedType::kDynamicSize;
-    if (stride != ShapedType::kDynamicSize)
+    if (size == ShapedType::kDynamic)
+      stride = ShapedType::kDynamic;
+    if (stride != ShapedType::kDynamic)
       stride *= size;
 
     if (useSizeAsStride)
       runningStride = sizes[i];
-    else if (stride == ShapedType::kDynamicSize)
+    else if (stride == ShapedType::kDynamic)
       runningStride =
           rewriter.create<LLVM::MulOp>(loc, runningStride, sizes[i]);
     else
index aec2d3f..ee939fa 100644 (file)
@@ -386,10 +386,10 @@ bool LLVMTypeConverter::canConvertToBarePtr(BaseMemRefType type) {
     return false;
 
   for (int64_t stride : strides)
-    if (ShapedType::isDynamicStrideOrOffset(stride))
+    if (ShapedType::isDynamic(stride))
       return false;
 
-  return !ShapedType::isDynamicStrideOrOffset(offset);
+  return !ShapedType::isDynamic(offset);
 }
 
 /// Convert a memref type to a bare pointer to the memref element type.
index ea586ff..600c381 100644 (file)
@@ -27,9 +27,8 @@ using namespace mlir::linalg;
 
 static MemRefType makeStridedLayoutDynamic(MemRefType type) {
   return MemRefType::Builder(type).setLayout(StridedLayoutAttr::get(
-      type.getContext(), ShapedType::kDynamicStrideOrOffset,
-      SmallVector<int64_t>(type.getRank(),
-                           ShapedType::kDynamicStrideOrOffset)));
+      type.getContext(), ShapedType::kDynamic,
+      SmallVector<int64_t>(type.getRank(), ShapedType::kDynamic)));
 }
 
 /// Helper function to extract the operand types that are passed to the
index 4685590..42d2d9a 100644 (file)
@@ -33,7 +33,7 @@ using namespace mlir;
 namespace {
 
 bool isStaticStrideOrOffset(int64_t strideOrOffset) {
-  return !ShapedType::isDynamicStrideOrOffset(strideOrOffset);
+  return !ShapedType::isDynamic(strideOrOffset);
 }
 
 LLVM::LLVMFuncOp getFreeFn(LLVMTypeConverter *typeConverter, ModuleOp module) {
@@ -145,7 +145,7 @@ struct ReallocOpLoweringBase : public AllocationOpLLVMLowering {
         [&](MemRefType type, function_ref<Value()> getDynamicSize) -> Value {
       // Compute number of elements.
       int64_t size = type.getShape()[0];
-      Value numElements = ((size == ShapedType::kDynamicSize)
+      Value numElements = ((size == ShapedType::kDynamic)
                                ? getDynamicSize()
                                : createIndexConstant(rewriter, loc, size));
       Type indexType = getIndexType();
@@ -1219,7 +1219,7 @@ private:
       Value stride = nullptr;
       int64_t targetRank = targetMemRefType.getRank();
       for (auto i : llvm::reverse(llvm::seq<int64_t>(0, targetRank))) {
-        if (!ShapedType::isDynamicStrideOrOffset(strides[i])) {
+        if (!ShapedType::isDynamic(strides[i])) {
           // If the stride for this dimension is dynamic, then use the product
           // of the sizes of the inner dimensions.
           stride = createIndexConstant(rewriter, loc, strides[i]);
@@ -1761,7 +1761,7 @@ struct SubViewOpLowering : public ConvertOpToLLVMPattern<memref::SubViewOp> {
 
     // Offset.
     auto llvmIndexType = typeConverter->convertType(rewriter.getIndexType());
-    if (!ShapedType::isDynamicStrideOrOffset(offset)) {
+    if (!ShapedType::isDynamic(offset)) {
       targetMemRef.setConstantOffset(rewriter, loc, offset);
     } else {
       Value baseOffset = sourceMemRef.offset(rewriter, loc);
@@ -1806,7 +1806,7 @@ struct SubViewOpLowering : public ConvertOpToLLVMPattern<memref::SubViewOp> {
         // constants.
         int64_t staticSize =
             subViewOp.getSource().getType().cast<MemRefType>().getShape()[i];
-        if (staticSize != ShapedType::kDynamicSize) {
+        if (staticSize != ShapedType::kDynamic) {
           size = rewriter.create<LLVM::ConstantOp>(
               loc, llvmIndexType, rewriter.getI64IntegerAttr(staticSize));
         } else {
@@ -1828,7 +1828,7 @@ struct SubViewOpLowering : public ConvertOpToLLVMPattern<memref::SubViewOp> {
                 : rewriter.create<LLVM::ConstantOp>(
                       loc, llvmIndexType,
                       rewriter.getI64IntegerAttr(subViewOp.getStaticSize(i)));
-        if (!ShapedType::isDynamicStrideOrOffset(strides[i])) {
+        if (!ShapedType::isDynamic(strides[i])) {
           stride = rewriter.create<LLVM::ConstantOp>(
               loc, llvmIndexType, rewriter.getI64IntegerAttr(strides[i]));
         } else {
@@ -1932,7 +1932,7 @@ struct ViewOpLowering : public ConvertOpToLLVMPattern<memref::ViewOp> {
                   ArrayRef<int64_t> strides, Value nextSize,
                   Value runningStride, unsigned idx) const {
     assert(idx < strides.size());
-    if (!ShapedType::isDynamicStrideOrOffset(strides[idx]))
+    if (!ShapedType::isDynamic(strides[idx]))
       return createIndexConstant(rewriter, loc, strides[idx]);
     if (nextSize)
       return runningStride
index 7e4c38b..fc07216 100644 (file)
@@ -844,7 +844,7 @@ static bool findIntermediateShape(ArrayRef<int64_t> lhsShape,
                                   bool isDynamic) {
   if (isDynamic) {
     // TODO (natashaknk): Make dynamic intermediate shape not always be rank-1
-    intermediateShape = {ShapedType::kDynamicSize};
+    intermediateShape = {ShapedType::kDynamic};
     return true;
   }
 
@@ -1886,7 +1886,7 @@ struct TileConverter : public OpConversionPattern<tosa::TileOp> {
     SmallVector<int64_t, 2> genericShape;
     for (int i = 0; i < rank; i++) {
       int64_t dim = multiples[i];
-      genericShape.push_back(dim == -1 ? ShapedType::kDynamicSize : dim);
+      genericShape.push_back(dim == -1 ? ShapedType::kDynamic : dim);
       genericShape.push_back(inputShape[i]);
     }
 
index 41c1928..92bb30e 100644 (file)
@@ -38,7 +38,7 @@ public:
     for (const auto &i : llvm::enumerate(sliceOp.getSize())) {
       int64_t size = i.value().cast<IntegerAttr>().getInt();
       size_t index = i.index();
-      sizes.push_back(size == -1 ? ShapedType::kDynamicSize : size);
+      sizes.push_back(size == -1 ? ShapedType::kDynamic : size);
       if (!ShapedType::isDynamic(sizes.back()))
         continue;
 
index b64b0d8..b5105ca 100644 (file)
@@ -108,7 +108,7 @@ getMemrefConstantHorizontalStride(ShapedType type) {
       strides.back() != 1)
     return llvm::None;
   int64_t stride = strides[strides.size() - 2];
-  if (stride == ShapedType::kDynamicStrideOrOffset)
+  if (stride == ShapedType::kDynamic)
     return llvm::None;
   return stride;
 }
index fd3cee5..ca5cdb7 100644 (file)
@@ -17,7 +17,6 @@
 #include "mlir/Dialect/Vector/Transforms/VectorTransforms.h"
 #include "mlir/IR/BuiltinTypes.h"
 #include "mlir/IR/TypeUtilities.h"
-#include "mlir/Support/MathExtras.h"
 #include "mlir/Target/LLVMIR/TypeToLLVM.h"
 #include "mlir/Transforms/DialectConversion.h"
 
@@ -963,8 +962,8 @@ computeContiguousStrides(MemRefType memRefType) {
   auto sizes = memRefType.getShape();
   for (int index = 0, e = strides.size() - 1; index < e; ++index) {
     if (ShapedType::isDynamic(sizes[index + 1]) ||
-        ShapedType::isDynamicStrideOrOffset(strides[index]) ||
-        ShapedType::isDynamicStrideOrOffset(strides[index + 1]))
+        ShapedType::isDynamic(strides[index]) ||
+        ShapedType::isDynamic(strides[index + 1]))
       return None;
     if (strides[index] != strides[index + 1] * sizes[index + 1])
       return None;
@@ -1009,7 +1008,7 @@ public:
     if (!targetStrides)
       return failure();
     // Only support static strides for now, regardless of contiguity.
-    if (llvm::any_of(*targetStrides, ShapedType::isDynamicStrideOrOffset))
+    if (llvm::any_of(*targetStrides, ShapedType::isDynamic))
       return failure();
 
     auto int64Ty = IntegerType::get(rewriter.getContext(), 64);
index 008b9c4..84b9f5f 100644 (file)
@@ -380,7 +380,7 @@ Optional<int64_t> MemRefRegion::getConstantBoundingSizeAndShape(
       // If no constant bound is found, then it can always be bound by the
       // memref's dim size if the latter has a constant size along this dim.
       auto dimSize = memRefType.getDimSize(d);
-      if (dimSize == ShapedType::kDynamicSize)
+      if (dimSize == ShapedType::kDynamic)
         return None;
       diffConstant = dimSize;
       // Lower bound becomes 0.
index 1329d9d..cd07b27 100644 (file)
@@ -92,7 +92,7 @@ static bool doubleBuffer(Value oldMemRef, AffineForOp forOp) {
   // Put together alloc operands for any dynamic dimensions of the memref.
   SmallVector<Value, 4> allocOperands;
   for (const auto &dim : llvm::enumerate(oldMemRefType.getShape())) {
-    if (dim.value() == ShapedType::kDynamicSize)
+    if (dim.value() == ShapedType::kDynamic)
       allocOperands.push_back(bOuter.createOrFold<memref::DimOp>(
           forOp.getLoc(), oldMemRef, dim.index()));
   }
index 2d4ecc8..0973e18 100644 (file)
@@ -1794,7 +1794,7 @@ MemRefType mlir::normalizeMemRefType(MemRefType memrefType,
     bool isDynDim =
         isNormalizedMemRefDynamicDim(d, layoutMap, memrefTypeDynDims, context);
     if (isDynDim) {
-      newShape[d] = ShapedType::kDynamicSize;
+      newShape[d] = ShapedType::kDynamic;
     } else {
       // The lower bound for the shape is always zero.
       Optional<int64_t> ubConst =
index 3c13506..0c2e13e 100644 (file)
@@ -767,9 +767,9 @@ bufferization::getMemRefTypeWithFullyDynamicLayout(TensorType tensorType,
   auto memorySpaceAttr = IntegerAttr::get(
       IntegerType::get(tensorType.getContext(), 64), memorySpace);
   auto rankedTensorType = tensorType.cast<RankedTensorType>();
-  int64_t dynamicOffset = ShapedType::kDynamicStrideOrOffset;
+  int64_t dynamicOffset = ShapedType::kDynamic;
   SmallVector<int64_t> dynamicStrides(rankedTensorType.getRank(),
-                                      ShapedType::kDynamicStrideOrOffset);
+                                      ShapedType::kDynamic);
   auto stridedLayout = StridedLayoutAttr::get(tensorType.getContext(),
                                               dynamicOffset, dynamicStrides);
   return MemRefType::get(rankedTensorType.getShape(),
index a1aa8db..e217d20 100644 (file)
@@ -46,8 +46,7 @@ mlir::bufferization::castOrReallocMemRefValue(OpBuilder &b, Value value,
         failed(getStridesAndOffset(target, targetStrides, targetOffset)))
       return false;
     auto dynamicToStatic = [](int64_t a, int64_t b) {
-      return a == MemRefType::getDynamicStrideOrOffset() &&
-             b != MemRefType::getDynamicStrideOrOffset();
+      return ShapedType::isDynamic(a) && !ShapedType::isDynamic(b);
     };
     if (dynamicToStatic(sourceOffset, targetOffset))
       return false;
@@ -69,7 +68,7 @@ mlir::bufferization::castOrReallocMemRefValue(OpBuilder &b, Value value,
   auto loc = value.getLoc();
   SmallVector<Value, 4> dynamicOperands;
   for (int i = 0; i < destType.getRank(); ++i) {
-    if (destType.getShape()[i] != ShapedType::kDynamicSize)
+    if (destType.getShape()[i] != ShapedType::kDynamic)
       continue;
     auto index = b.createOrFold<arith::ConstantIndexOp>(loc, i);
     Value size = b.create<memref::DimOp>(loc, value, index);
index bff3b66..f7430d2 100644 (file)
@@ -28,9 +28,9 @@ static bool hasFullyDynamicLayoutMap(MemRefType type) {
   SmallVector<int64_t, 4> strides;
   if (failed(getStridesAndOffset(type, strides, offset)))
     return false;
-  if (!llvm::all_of(strides, ShapedType::isDynamicStrideOrOffset))
+  if (!llvm::all_of(strides, ShapedType::isDynamic))
     return false;
-  if (!ShapedType::isDynamicStrideOrOffset(offset))
+  if (!ShapedType::isDynamic(offset))
     return false;
   return true;
 }
index fefaf6e..6054dc4 100644 (file)
@@ -147,7 +147,7 @@ static Type parseVectorType(AsmParser &parser) {
 
   // We parsed a generic dimension list, but vectors only support two forms:
   //  - single non-dynamic entry in the list (fixed vector);
-  //  - two elements, the first dynamic (indicated by ShapedType::kDynamicSize)
+  //  - two elements, the first dynamic (indicated by ShapedType::kDynamic)
   //  and the second
   //    non-dynamic (scalable vector).
   if (dims.empty() || dims.size() > 2 ||
index 0854761..26b63a9 100644 (file)
@@ -1046,7 +1046,7 @@ ParseResult SplitOp::parse(OpAsmParser &parser, OperationState &result) {
     }
 
     staticSplitPoint =
-        parser.getBuilder().getI64IntegerAttr(ShapedType::kDynamicSize);
+        parser.getBuilder().getI64IntegerAttr(ShapedType::kDynamic);
   }
 
   result.addAttribute(
@@ -1062,7 +1062,7 @@ ParseResult SplitOp::parse(OpAsmParser &parser, OperationState &result) {
 void SplitOp::print(OpAsmPrinter &printer) {
   printer << " " << getTarget() << " after ";
   int64_t staticSplitSize = static_cast<int64_t>(getStaticSplitPoint());
-  if (staticSplitSize != ShapedType::kDynamicSize)
+  if (staticSplitSize != ShapedType::kDynamic)
     printer << staticSplitSize;
   else
     printer << getDynamicSplitPoint();
@@ -1073,7 +1073,7 @@ void SplitOp::print(OpAsmPrinter &printer) {
 
 LogicalResult SplitOp::verify() {
   if ((static_cast<int64_t>(getStaticSplitPoint()) !=
-       ShapedType::kDynamicSize) ^
+       ShapedType::kDynamic) ^
       (getDynamicSplitPoint() == nullptr)) {
     return emitOpError() << "expects either a dynamic or a static split "
                             "point to be provided";
@@ -1304,7 +1304,7 @@ SmallVector<OpFoldResult> transform::TileOp::getMixedSizes() {
   unsigned dynamicPos = 0;
   Builder builder(getContext());
   for (int64_t size : tileSizes) {
-    if (size == ShapedType::kDynamicSize) {
+    if (size == ShapedType::kDynamic) {
       results.push_back(dynamic[dynamicPos++]);
     } else {
       results.push_back(builder.getIndexAttr(size));
@@ -1322,7 +1322,7 @@ ParseResult transform::TileOp::parse(OpAsmParser &parser,
   if (parser.parseOperand(target) ||
       parser.resolveOperand(target, pdlOperationType, result.operands) ||
       parseDynamicIndexList(parser, dynamicSizes, staticSizes,
-                            ShapedType::kDynamicSize) ||
+                            ShapedType::kDynamic) ||
       parser.resolveOperands(dynamicSizes, pdlOperationType, result.operands) ||
       parser.parseOptionalAttrDict(result.attributes))
     return ParseResult::failure();
@@ -1337,7 +1337,7 @@ ParseResult transform::TileOp::parse(OpAsmParser &parser,
 void TileOp::print(OpAsmPrinter &p) {
   p << ' ' << getTarget();
   printDynamicIndexList(p, getOperation(), getDynamicSizes(), getStaticSizes(),
-                        ShapedType::kDynamicSize);
+                        ShapedType::kDynamic);
   p.printOptionalAttrDict((*this)->getAttrs(), {getStaticSizesAttrName()});
 }
 
@@ -1375,7 +1375,7 @@ void transform::TileToForeachThreadOp::build(
   SmallVector<int64_t> staticTileSizes;
   SmallVector<Value> dynamicTileSizes;
   dispatchIndexOpFoldResults(mixedTileSizes, dynamicTileSizes, staticTileSizes,
-                             ShapedType::kDynamicSize);
+                             ShapedType::kDynamic);
   // Call the default builder which sets up the proper operands segment sizes
   // attributes for multiple variadic operands. In the absence of this, horrible
   // bugs ensue.
@@ -1410,7 +1410,7 @@ void transform::TileToForeachThreadOp::build(
   SmallVector<int64_t> staticNumThreads;
   SmallVector<Value> dynamicNumThreads;
   dispatchIndexOpFoldResults(mixedNumThreads, dynamicNumThreads,
-                             staticNumThreads, ShapedType::kDynamicSize);
+                             staticNumThreads, ShapedType::kDynamic);
   // Call the default builder which sets up the proper operands segment sizes
   // attributes for multiple variadic operands. In the absence of this, horrible
   // bugs ensue.
@@ -1663,7 +1663,7 @@ SmallVector<OpFoldResult> transform::TileToScfForOp::getMixedSizes() {
   unsigned dynamicPos = 0;
   Builder builder(getContext());
   for (int64_t size : tileSizes) {
-    if (size == ShapedType::kDynamicSize) {
+    if (size == ShapedType::kDynamic) {
       results.push_back(dynamic[dynamicPos++]);
     } else {
       results.push_back(builder.getIndexAttr(size));
@@ -1681,7 +1681,7 @@ ParseResult transform::TileToScfForOp::parse(OpAsmParser &parser,
   if (parser.parseOperand(target) ||
       parser.resolveOperand(target, pdlOperationType, result.operands) ||
       parseDynamicIndexList(parser, dynamicSizes, staticSizes,
-                            ShapedType::kDynamicSize) ||
+                            ShapedType::kDynamic) ||
       parser.resolveOperands(dynamicSizes, pdlOperationType, result.operands) ||
       parser.parseOptionalAttrDict(result.attributes))
     return ParseResult::failure();
@@ -1696,7 +1696,7 @@ ParseResult transform::TileToScfForOp::parse(OpAsmParser &parser,
 void TileToScfForOp::print(OpAsmPrinter &p) {
   p << ' ' << getTarget();
   printDynamicIndexList(p, getOperation(), getDynamicSizes(), getStaticSizes(),
-                        ShapedType::kDynamicSize);
+                        ShapedType::kDynamic);
   p.printOptionalAttrDict((*this)->getAttrs(), {getStaticSizesAttrName()});
 }
 
index b486708..bdc7bac 100644 (file)
@@ -1706,7 +1706,7 @@ struct RemoveOutsDependency : public OpRewritePattern<GenericOp> {
         modifiedOutput = true;
         SmallVector<Value> dynamicDims;
         for (const auto &dim : llvm::enumerate(operandType.getShape())) {
-          if (dim.value() != ShapedType::kDynamicSize)
+          if (dim.value() != ShapedType::kDynamic)
             continue;
           dynamicDims.push_back(rewriter.createOrFold<tensor::DimOp>(
               loc, operandVal, dim.index()));
index 2d51b8d..49bf3b3 100644 (file)
@@ -156,10 +156,10 @@ static LinalgOp fuse(OpBuilder &b, LinalgOp producer,
       continue;
     unsigned rank = tensorType.getRank();
     SmallVector<int64_t, 4> staticOffsetsVector(
-        rank, ShapedType::kDynamicStrideOrOffset);
-    SmallVector<int64_t, 4> staticSizesVector(rank, ShapedType::kDynamicSize);
+        rank, ShapedType::kDynamic);
+    SmallVector<int64_t, 4> staticSizesVector(rank, ShapedType::kDynamic);
     SmallVector<int64_t, 4> staticStridesVector(
-        rank, ShapedType::kDynamicStrideOrOffset);
+        rank, ShapedType::kDynamic);
     resultTypes.push_back(tensor::ExtractSliceOp::inferResultType(
         tensorType, staticOffsetsVector, staticSizesVector,
         staticStridesVector));
index baeb5c2..bff2d54 100644 (file)
@@ -425,7 +425,7 @@ FailureOr<Value> mlir::linalg::hoistPaddingOnTensors(
 
   // Create the packed tensor<?x?x..?xtransposedShape> into which we amortize
   // padding.
-  SmallVector<int64_t> packedShape(nPackedLoops, ShapedType::kDynamicSize);
+  SmallVector<int64_t> packedShape(nPackedLoops, ShapedType::kDynamic);
   // TODO: go grab dims when necessary, for now tensor::PadOp returns a static
   // tensor.
   llvm::append_range(packedShape, transposedTensorType->getShape());
index 1d966b3..5b623a9 100644 (file)
@@ -65,7 +65,7 @@ static Value allocBuffer(ImplicitLocOpBuilder &b,
 
   // Fallback dynamic buffer.
   auto dynamicBufferType =
-      MemRefType::get(ShapedType::kDynamicSize, b.getIntegerType(8));
+      MemRefType::get(ShapedType::kDynamic, b.getIntegerType(8));
   Value mul = b.createOrFold<arith::MulIOp>(
       b.create<arith::ConstantIndexOp>(width), allocSize);
   if (options.useAlloca)
@@ -93,7 +93,7 @@ defaultAllocBufferCallBack(const LinalgPromotionOptions &options,
   Value buffer = allocBuffer(b, options, viewType.getElementType(), allocSize,
                              layout, alignment);
   SmallVector<int64_t, 4> dynSizes(boundingSubViewSize.size(),
-                                   ShapedType::kDynamicSize);
+                                   ShapedType::kDynamic);
   Value view = b.createOrFold<memref::ViewOp>(
       MemRefType::get(dynSizes, viewType.getElementType()), buffer, zero,
       boundingSubViewSize);
@@ -243,7 +243,7 @@ FailureOr<PromotionInfo> mlir::linalg::promoteSubviewAsNewBuffer(
     partialSizes.push_back(
         b.createOrFold<memref::DimOp>(loc, subView, resultDimIdx++));
   }
-  SmallVector<int64_t, 4> dynSizes(fullSizes.size(), ShapedType::kDynamicSize);
+  SmallVector<int64_t, 4> dynSizes(fullSizes.size(), ShapedType::kDynamic);
   // If a callback is not specified, then use the default implementation for
   // allocating the promoted buffer.
   Optional<Value> fullLocalView = allocationFn(b, subView, fullSizes, layout);
index 26a49b9..efa6b1f 100644 (file)
@@ -44,7 +44,7 @@ FailureOr<SplitReductionResult> mlir::linalg::splitReduction(
   unsigned reductionDim = dims[0];
   SmallVector<int64_t, 4> loopRanges = op.getStaticLoopRanges();
   int64_t reductionDimSize = loopRanges[reductionDim];
-  if (reductionDimSize == ShapedType::kDynamicSize ||
+  if (reductionDimSize == ShapedType::kDynamic ||
       reductionDimSize % ratio != 0)
     return b.notifyMatchFailure(
         op, "Reduction dimension not divisible by split ratio");
@@ -253,7 +253,7 @@ FailureOr<SplitReductionResult> mlir::linalg::splitReductionByScaling(
   unsigned reductionDimPos = dims[0];
   SmallVector<int64_t> loopRanges = op.getStaticLoopRanges();
   int64_t reductionDimSize = loopRanges[reductionDimPos];
-  if (reductionDimSize == ShapedType::kDynamicSize ||
+  if (reductionDimSize == ShapedType::kDynamic ||
       reductionDimSize % splitFactor != 0 ||
       insertSplitDimension >= loopRanges.size())
     return b.notifyMatchFailure(
index 02f4e9d..256ede9 100644 (file)
@@ -280,7 +280,7 @@ struct LinalgOpPartialReductionInterface
     for (int64_t idx : llvm::seq<int64_t>(0, oldShape.size() + 1)) {
       if (idx == insertSplitDimension) {
         dispatchIndexOpFoldResults(sizes[idx], dynamicDims, newOutputShape,
-                                   ShapedType::kDynamicStrideOrOffset);
+                                   ShapedType::kDynamic);
         continue;
       }
       int64_t oldIdx = idx < insertSplitDimension ? idx : idx - 1;
index fc34353..712d4c2 100644 (file)
@@ -218,7 +218,7 @@ SmallVector<Value, 4> getDynOperands(Location loc, Value val, OpBuilder &b) {
   SmallVector<Value, 4> dynOperands;
   auto shapedType = val.getType().cast<ShapedType>();
   for (const auto &dim : llvm::enumerate(shapedType.getShape())) {
-    if (dim.value() == ShapedType::kDynamicSize)
+    if (dim.value() == ShapedType::kDynamic)
       dynOperands.push_back(createOrFoldDimOp(b, loc, val, dim.index()));
   }
   return dynOperands;
index 73c2b53..bf54d46 100644 (file)
@@ -31,22 +31,22 @@ namespace {
 namespace saturated_arith {
 struct Wrapper {
   static Wrapper stride(int64_t v) {
-    return (ShapedType::isDynamicStrideOrOffset(v)) ? Wrapper{true, 0}
+    return (ShapedType::isDynamic(v)) ? Wrapper{true, 0}
                                                     : Wrapper{false, v};
   }
   static Wrapper offset(int64_t v) {
-    return (ShapedType::isDynamicStrideOrOffset(v)) ? Wrapper{true, 0}
+    return (ShapedType::isDynamic(v)) ? Wrapper{true, 0}
                                                     : Wrapper{false, v};
   }
   static Wrapper size(int64_t v) {
     return (ShapedType::isDynamic(v)) ? Wrapper{true, 0} : Wrapper{false, v};
   }
   int64_t asOffset() {
-    return saturated ? ShapedType::kDynamicStrideOrOffset : v;
+    return saturated ? ShapedType::kDynamic : v;
   }
-  int64_t asSize() { return saturated ? ShapedType::kDynamicSize : v; }
+  int64_t asSize() { return saturated ? ShapedType::kDynamic : v; }
   int64_t asStride() {
-    return saturated ? ShapedType::kDynamicStrideOrOffset : v;
+    return saturated ? ShapedType::kDynamic : v;
   }
   bool operator==(Wrapper other) {
     return (saturated && other.saturated) ||
@@ -136,7 +136,7 @@ Type mlir::memref::getTensorTypeFromMemRefType(Type type) {
 /// - `memRefTy == memref<?x?xf32, strided<[?, 1], offset: ?>>`
 /// - `getAttributes == getConstantStrides` (i.e., a wrapper around
 /// `getStridesAndOffset`), and
-/// - `isDynamic == isDynamicStrideOrOffset`
+/// - `isDynamic == ShapedType::isDynamic`
 /// Will yield: `values == [2, 1]`
 static void constifyIndexValues(
     SmallVectorImpl<OpFoldResult> &values, MemRefType memRefTy,
@@ -296,7 +296,7 @@ struct SimplifyAllocConst : public OpRewritePattern<AllocLikeOp> {
         newShapeConstants.push_back(constantIndexOp.value());
       } else {
         // Dynamic shape dimension not folded; copy dynamicSize from old memref.
-        newShapeConstants.push_back(ShapedType::kDynamicSize);
+        newShapeConstants.push_back(ShapedType::kDynamic);
         dynamicSizes.push_back(dynamicSize);
       }
       dynamicDimPos++;
@@ -705,16 +705,16 @@ bool CastOp::canFoldIntoConsumerOp(CastOp castOp) {
 
   // If cast is towards more static offset along any dimension, don't fold.
   if (sourceOffset != resultOffset)
-    if (ShapedType::isDynamicStrideOrOffset(sourceOffset) &&
-        !ShapedType::isDynamicStrideOrOffset(resultOffset))
+    if (ShapedType::isDynamic(sourceOffset) &&
+        !ShapedType::isDynamic(resultOffset))
       return false;
 
   // If cast is towards more static strides along any dimension, don't fold.
   for (auto it : llvm::zip(sourceStrides, resultStrides)) {
     auto ss = std::get<0>(it), st = std::get<1>(it);
     if (ss != st)
-      if (ShapedType::isDynamicStrideOrOffset(ss) &&
-          !ShapedType::isDynamicStrideOrOffset(st))
+      if (ShapedType::isDynamic(ss) &&
+          !ShapedType::isDynamic(st))
         return false;
   }
 
@@ -747,8 +747,8 @@ bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
       // same. They are also compatible if either one is dynamic (see
       // description of MemRefCastOp for details).
       auto checkCompatible = [](int64_t a, int64_t b) {
-        return (a == MemRefType::getDynamicStrideOrOffset() ||
-                b == MemRefType::getDynamicStrideOrOffset() || a == b);
+        return (ShapedType::isDynamic(a) ||
+                ShapedType::isDynamic(b) || a == b);
       };
       if (!checkCompatible(aOffset, bOffset))
         return false;
@@ -1445,7 +1445,7 @@ SmallVector<OpFoldResult>
 ExtractStridedMetadataOp::getConstifiedMixedStrides() {
   SmallVector<OpFoldResult> values = getAsOpFoldResult(getStrides());
   constifyIndexValues(values, getSource().getType(), getContext(),
-                      getConstantStrides, ShapedType::isDynamicStrideOrOffset);
+                      getConstantStrides, ShapedType::isDynamic);
   return values;
 }
 
@@ -1453,7 +1453,7 @@ OpFoldResult ExtractStridedMetadataOp::getConstifiedMixedOffset() {
   OpFoldResult offsetOfr = getAsOpFoldResult(getOffset());
   SmallVector<OpFoldResult> values(1, offsetOfr);
   constifyIndexValues(values, getSource().getType(), getContext(),
-                      getConstantOffset, ShapedType::isDynamicStrideOrOffset);
+                      getConstantOffset, ShapedType::isDynamic);
   return values[0];
 }
 
@@ -1772,11 +1772,11 @@ void ReinterpretCastOp::build(OpBuilder &b, OperationState &result,
   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
   dispatchIndexOpFoldResults(offset, dynamicOffsets, staticOffsets,
-                             ShapedType::kDynamicStrideOrOffset);
+                             ShapedType::kDynamic);
   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
-                             ShapedType::kDynamicSize);
+                             ShapedType::kDynamic);
   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
-                             ShapedType::kDynamicStrideOrOffset);
+                             ShapedType::kDynamic);
   build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
@@ -1847,8 +1847,8 @@ LogicalResult ReinterpretCastOp::verify() {
 
   // Match offset in result memref type and in static_offsets attribute.
   int64_t expectedOffset = extractFromI64ArrayAttr(getStaticOffsets()).front();
-  if (!ShapedType::isDynamicStrideOrOffset(resultOffset) &&
-      !ShapedType::isDynamicStrideOrOffset(expectedOffset) &&
+  if (!ShapedType::isDynamic(resultOffset) &&
+      !ShapedType::isDynamic(expectedOffset) &&
       resultOffset != expectedOffset)
     return emitError("expected result type with offset = ")
            << resultOffset << " instead of " << expectedOffset;
@@ -1858,8 +1858,8 @@ LogicalResult ReinterpretCastOp::verify() {
            resultStrides, extractFromI64ArrayAttr(getStaticStrides())))) {
     int64_t resultStride = std::get<0>(en.value());
     int64_t expectedStride = std::get<1>(en.value());
-    if (!ShapedType::isDynamicStrideOrOffset(resultStride) &&
-        !ShapedType::isDynamicStrideOrOffset(expectedStride) &&
+    if (!ShapedType::isDynamic(resultStride) &&
+        !ShapedType::isDynamic(expectedStride) &&
         resultStride != expectedStride)
       return emitError("expected result type with stride = ")
              << expectedStride << " instead of " << resultStride
@@ -1909,7 +1909,7 @@ SmallVector<OpFoldResult> ReinterpretCastOp::getConstifiedMixedSizes() {
 SmallVector<OpFoldResult> ReinterpretCastOp::getConstifiedMixedStrides() {
   SmallVector<OpFoldResult> values = getMixedStrides();
   constifyIndexValues(values, getType(), getContext(), getConstantStrides,
-                      ShapedType::isDynamicStrideOrOffset);
+                      ShapedType::isDynamic);
   return values;
 }
 
@@ -1918,7 +1918,7 @@ OpFoldResult ReinterpretCastOp::getConstifiedMixedOffset() {
   assert(values.size() == 1 &&
          "reinterpret_cast must have one and only one offset");
   constifyIndexValues(values, getType(), getContext(), getConstantOffset,
-                      ShapedType::isDynamicStrideOrOffset);
+                      ShapedType::isDynamic);
   return values[0];
 }
 
@@ -2284,7 +2284,7 @@ computeCollapsedLayoutMap(MemRefType srcType,
       // the corresponding stride may have to be skipped. (See above comment.)
       // Therefore, the result stride cannot be statically determined and must
       // be dynamic.
-      resultStrides.push_back(ShapedType::kDynamicStrideOrOffset);
+      resultStrides.push_back(ShapedType::kDynamic);
     }
   }
 
@@ -2481,7 +2481,7 @@ LogicalResult ReshapeOp::verify() {
   if (resultMemRefType) {
     if (!resultMemRefType.getLayout().isIdentity())
       return emitOpError("result memref type should have identity affine map");
-    if (shapeSize == ShapedType::kDynamicSize)
+    if (shapeSize == ShapedType::kDynamic)
       return emitOpError("cannot use shape operand with dynamic length to "
                          "reshape to statically-ranked memref type");
     if (shapeSize != resultMemRefType.getRank())
@@ -2575,11 +2575,11 @@ Type SubViewOp::inferResultType(MemRefType sourceMemRefType,
   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
-                             ShapedType::kDynamicStrideOrOffset);
+                             ShapedType::kDynamic);
   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
-                             ShapedType::kDynamicSize);
+                             ShapedType::kDynamic);
   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
-                             ShapedType::kDynamicStrideOrOffset);
+                             ShapedType::kDynamic);
   return SubViewOp::inferResultType(sourceMemRefType, staticOffsets,
                                     staticSizes, staticStrides);
 }
@@ -2625,11 +2625,11 @@ Type SubViewOp::inferRankReducedResultType(ArrayRef<int64_t> resultShape,
   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
-                             ShapedType::kDynamicStrideOrOffset);
+                             ShapedType::kDynamic);
   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
-                             ShapedType::kDynamicSize);
+                             ShapedType::kDynamic);
   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
-                             ShapedType::kDynamicStrideOrOffset);
+                             ShapedType::kDynamic);
   return SubViewOp::inferRankReducedResultType(
       resultShape, sourceRankedTensorType, staticOffsets, staticSizes,
       staticStrides);
@@ -2646,11 +2646,11 @@ void SubViewOp::build(OpBuilder &b, OperationState &result,
   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
-                             ShapedType::kDynamicStrideOrOffset);
+                             ShapedType::kDynamic);
   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
-                             ShapedType::kDynamicSize);
+                             ShapedType::kDynamic);
   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
-                             ShapedType::kDynamicStrideOrOffset);
+                             ShapedType::kDynamic);
   auto sourceMemRefType = source.getType().cast<MemRefType>();
   // Structuring implementation this way avoids duplication between builders.
   if (!resultType) {
index bbf8357..9d2713f 100644 (file)
@@ -81,7 +81,7 @@ public:
 
     detail::bindSymbolsList(rewriter.getContext(), symbols);
     AffineExpr expr = symbols.front();
-    values[0] = ShapedType::isDynamicStrideOrOffset(sourceOffset)
+    values[0] = ShapedType::isDynamic(sourceOffset)
                     ? getAsOpFoldResult(newExtractStridedMetadata.getOffset())
                     : rewriter.getIndexAttr(sourceOffset);
     SmallVector<OpFoldResult> subOffsets = subview.getMixedOffsets();
@@ -91,7 +91,7 @@ public:
     for (unsigned i = 0; i < sourceRank; ++i) {
       // Compute the stride.
       OpFoldResult origStride =
-          ShapedType::isDynamicStrideOrOffset(sourceStrides[i])
+          ShapedType::isDynamic(sourceStrides[i])
               ? origStrides[i]
               : OpFoldResult(rewriter.getIndexAttr(sourceStrides[i]));
       strides.push_back(makeComposedFoldedAffineApply(
@@ -273,7 +273,7 @@ SmallVector<OpFoldResult> getExpandedStrides(memref::ExpandShapeOp expandShape,
          "getStridesAndOffset must work on valid expand_shape");
 
   OpFoldResult origStride =
-      ShapedType::isDynamicStrideOrOffset(strides[groupId])
+      ShapedType::isDynamic(strides[groupId])
           ? origStrides[groupId]
           : builder.getIndexAttr(strides[groupId]);
 
@@ -425,7 +425,7 @@ getCollapsedStride(memref::CollapseShapeOp collapseShape, OpBuilder &builder,
   int64_t innerMostDimForGroup = reassocGroup.back();
   int64_t innerMostStrideForGroup = strides[innerMostDimForGroup];
   collapsedStride.push_back(
-      ShapedType::isDynamicStrideOrOffset(innerMostStrideForGroup)
+      ShapedType::isDynamic(innerMostStrideForGroup)
           ? origStrides[innerMostDimForGroup]
           : builder.getIndexAttr(innerMostStrideForGroup));
 
@@ -483,7 +483,7 @@ public:
     unsigned reshapeRank = reshapeType.getRank();
 
     OpFoldResult offsetOfr =
-        ShapedType::isDynamicStrideOrOffset(offset)
+        ShapedType::isDynamic(offset)
             ? getAsOpFoldResult(newExtractStridedMetadata.getOffset())
             : rewriter.getIndexAttr(offset);
 
index 286ff0b..5d931ac 100644 (file)
@@ -171,9 +171,9 @@ static Optional<int64_t> getTypeNumBytes(const SPIRVConversionOptions &options,
       return elementSize;
 
     auto dims = memRefType.getShape();
-    if (llvm::is_contained(dims, ShapedType::kDynamicSize) ||
-        offset == MemRefType::getDynamicStrideOrOffset() ||
-        llvm::is_contained(strides, MemRefType::getDynamicStrideOrOffset()))
+    if (llvm::is_contained(dims, ShapedType::kDynamic) ||
+        ShapedType::isDynamic(offset) ||
+        llvm::is_contained(strides, ShapedType::kDynamic))
       return llvm::None;
 
     int64_t memrefSize = -1;
@@ -749,8 +749,8 @@ Value mlir::spirv::getVulkanElementPtr(SPIRVTypeConverter &typeConverter,
   int64_t offset;
   SmallVector<int64_t, 4> strides;
   if (failed(getStridesAndOffset(baseType, strides, offset)) ||
-      llvm::is_contained(strides, MemRefType::getDynamicStrideOrOffset()) ||
-      offset == MemRefType::getDynamicStrideOrOffset()) {
+      llvm::is_contained(strides, ShapedType::kDynamic) ||
+      ShapedType::isDynamic(offset)) {
     return nullptr;
   }
 
@@ -780,8 +780,8 @@ Value mlir::spirv::getOpenCLElementPtr(SPIRVTypeConverter &typeConverter,
   int64_t offset;
   SmallVector<int64_t, 4> strides;
   if (failed(getStridesAndOffset(baseType, strides, offset)) ||
-      llvm::is_contained(strides, MemRefType::getDynamicStrideOrOffset()) ||
-      offset == MemRefType::getDynamicStrideOrOffset()) {
+      llvm::is_contained(strides, ShapedType::kDynamic) ||
+      ShapedType::isDynamic(offset)) {
     return nullptr;
   }
 
index 0fd8d2d..5c1a9fa 100644 (file)
@@ -1763,7 +1763,7 @@ LogicalResult mlir::shape::ShapeOfOp::inferReturnTypes(
   else {
     auto shapedTy = operands[0].getType().cast<ShapedType>();
     int64_t rank =
-        shapedTy.hasRank() ? shapedTy.getRank() : ShapedType::kDynamicSize;
+        shapedTy.hasRank() ? shapedTy.getRank() : ShapedType::kDynamic;
     Type indexTy = IndexType::get(context);
     Type extentTensorTy = RankedTensorType::get({rank}, indexTy);
     inferredReturnTypes.assign({extentTensorTy});
index 36b45de..7ee988b 100644 (file)
@@ -345,7 +345,7 @@ LogicalResult ConvertOp::verify() {
       // (e.g. 10 vs. 10, 10 vs. ?, or ? vs. ?), but reject direct mismatches or
       // matches that would need a runtime assert (e.g. 10 vs. 20 or ? vs. 10).
       for (unsigned d = 0, rank = tp1.getRank(); d < rank; d++)
-        if (shape1[d] != shape2[d] && shape2[d] != ShapedType::kDynamicSize)
+        if (shape1[d] != shape2[d] && shape2[d] != ShapedType::kDynamic)
           return emitError("unexpected conversion mismatch in dimension ") << d;
       return success();
     }
@@ -499,7 +499,7 @@ LogicalResult ConcatenateOp::verify() {
   for (auto type : getInputs().getTypes()) {
     auto shape = type.cast<RankedTensorType>().getShape();
     for (auto dim : shape) {
-      if (dim == ShapedType::kDynamicSize)
+      if (ShapedType::isDynamic(dim))
         return emitError("Only statically-sized input tensors are supported.");
     }
   }
@@ -522,7 +522,7 @@ LogicalResult ConcatenateOp::verify() {
   for (unsigned i = 0; i < rank; i++) {
     auto dstDim = dstTp.getShape()[i];
     if (i == concatDim) {
-      if (dstDim != ShapedType::kDynamicSize) {
+      if (!ShapedType::isDynamic(dstDim)) {
         unsigned sumDim = 0;
         for (auto src : getInputs()) {
           // If we reach here, all inputs should have static shapes.
@@ -540,7 +540,7 @@ LogicalResult ConcatenateOp::verify() {
       int64_t prev = dstDim;
       for (auto src : getInputs()) {
         auto d = src.getType().cast<RankedTensorType>().getShape()[i];
-        if (prev != ShapedType::kDynamicSize && d != prev)
+        if (!ShapedType::isDynamic(prev) && d != prev)
           return emitError("All dimensions (expect for the concatenating one) "
                            "should be equal.");
         prev = d;
@@ -701,7 +701,7 @@ LogicalResult SortOp::verify() {
       int64_t dim = mtp.getShape()[0];
       // We can't check the size of dynamic dimension at compile-time, but all
       // xs and ys should have a dimension not less than n at runtime.
-      if (n && dim != ShapedType::kDynamicSize && dim < n.value())
+      if (n && !ShapedType::isDynamic(dim) && dim < n.value())
         return emitError(llvm::formatv("xs and ys need to have a dimension >= n"
                                        ": {0} < {1}",
                                        dim, n.value()));
@@ -744,7 +744,7 @@ LogicalResult SortCooOp::verify() {
   auto checkDim = [&](Value v, uint64_t min, const char *message) {
     MemRefType tp = v.getType().cast<MemRefType>();
     int64_t dim = tp.getShape()[0];
-    if (dim != ShapedType::kDynamicSize && dim < (int64_t)min) {
+    if (!ShapedType::isDynamic(dim) && dim < (int64_t)min) {
       emitError(llvm::formatv("{0} got {1} < {2}", message, dim, min));
     }
   };
index 531acc6..4460c18 100644 (file)
@@ -152,7 +152,7 @@ void SparseTensorLoopEmitter::initializeLoopEmit(
     auto rank = rtp.getRank();
     auto shape = rtp.getShape();
     auto enc = getSparseTensorEncoding(rtp);
-    auto dynShape = {ShapedType::kDynamicSize};
+    auto dynShape = {ShapedType::kDynamic};
     // Scan all dimensions of current tensor.
     for (int64_t d = 0; d < rank; d++) {
       // This should be called only once at beginning.
@@ -203,7 +203,7 @@ void SparseTensorLoopEmitter::initializeLoopEmit(
     } else {
       // Annotated sparse tensors.
       // We also need the value buffer for annotated all dense `sparse` tensor.
-      auto dynShape = {ShapedType::kDynamicSize};
+      auto dynShape = {ShapedType::kDynamic};
       auto sparseTp = MemRefType::get(dynShape, elementType);
       valBuffer[t] = builder.create<ToValuesOp>(loc, sparseTp, tensor);
     }
@@ -846,12 +846,12 @@ void mlir::sparse_tensor::genReshapeDstShape(
       // expanded from the i-th dimension in srcShape.
       // For example, if srcDim = 8, then the expanded shape could be <2x?x2>,
       // but not <2x?x?>.
-      if (staticDstShape[j] == ShapedType::kDynamicSize) {
+      if (staticDstShape[j] == ShapedType::kDynamic) {
         // The expanded dimension has dynamic size. We compute the dimension
         // by dividing srcDim by the product of the static dimensions.
         int64_t product = 1;
         for (unsigned k = start; k < start + map.size(); k++) {
-          if (staticDstShape[k] != ShapedType::kDynamicSize) {
+          if (staticDstShape[k] != ShapedType::kDynamic) {
             product *= staticDstShape[k];
           }
         }
@@ -959,7 +959,7 @@ Value mlir::sparse_tensor::genAlloca(OpBuilder &builder, Location loc,
 
 Value mlir::sparse_tensor::genAlloca(OpBuilder &builder, Location loc, Value sz,
                                      Type tp) {
-  auto memTp = MemRefType::get({ShapedType::kDynamicSize}, tp);
+  auto memTp = MemRefType::get({ShapedType::kDynamic}, tp);
   return builder.create<memref::AllocaOp>(loc, memTp, ValueRange{sz});
 }
 
@@ -976,7 +976,7 @@ Value mlir::sparse_tensor::allocDenseTensor(OpBuilder &builder, Location loc,
   auto memTp = MemRefType::get(shape, elemTp);
   SmallVector<Value> dynamicSizes;
   for (unsigned i = 0, rank = tensorTp.getRank(); i < rank; i++) {
-    if (shape[i] == ShapedType::kDynamicSize)
+    if (shape[i] == ShapedType::kDynamic)
       dynamicSizes.push_back(sizes[i]);
   }
   Value mem = builder.create<memref::AllocOp>(loc, memTp, dynamicSizes);
index b5e9826..0592009 100644 (file)
@@ -722,7 +722,7 @@ LogicalResult matchAndRewriteSortOp(OpTy op, ValueRange xys, uint64_t nx,
     auto mtp = v.getType().cast<MemRefType>();
     if (!mtp.isDynamicDim(0)) {
       auto newMtp =
-          MemRefType::get({ShapedType::kDynamicSize}, mtp.getElementType());
+          MemRefType::get({ShapedType::kDynamic}, mtp.getElementType());
       v = rewriter.create<memref::CastOp>(loc, newMtp, v);
     }
     operands.push_back(v);
@@ -786,7 +786,7 @@ public:
 
       Value c2 = constantIndex(rewriter, loc, 2);
       auto bufferType =
-          MemRefType::get({ShapedType::kDynamicSize}, value.getType());
+          MemRefType::get({ShapedType::kDynamic}, value.getType());
       scf::IfOp ifOp = rewriter.create<scf::IfOp>(loc, bufferType, cond,
                                                   /*else=*/true);
       // True branch.
index 910b744..775b20d 100644 (file)
@@ -237,16 +237,16 @@ convertSparseTensorType(Type type, SmallVectorImpl<Type> &fields) {
     // order. Clients of this type know what field is what from the sparse
     // tensor type.
     if (isCompressedDim(rType, r)) {
-      fields.push_back(MemRefType::get({ShapedType::kDynamicSize}, ptrType));
-      fields.push_back(MemRefType::get({ShapedType::kDynamicSize}, idxType));
+      fields.push_back(MemRefType::get({ShapedType::kDynamic}, ptrType));
+      fields.push_back(MemRefType::get({ShapedType::kDynamic}, idxType));
     } else if (isSingletonDim(rType, r)) {
-      fields.push_back(MemRefType::get({ShapedType::kDynamicSize}, idxType));
+      fields.push_back(MemRefType::get({ShapedType::kDynamic}, idxType));
     } else {
       assert(isDenseDim(rType, r)); // no fields
     }
   }
   // The values array.
-  fields.push_back(MemRefType::get({ShapedType::kDynamicSize}, eltType));
+  fields.push_back(MemRefType::get({ShapedType::kDynamic}, eltType));
   assert(fields.size() == lastField);
   return success();
 }
@@ -288,7 +288,7 @@ static void allocSchemeForRank(OpBuilder &builder, Location loc,
 /// Creates allocation operation.
 static Value createAllocation(OpBuilder &builder, Location loc, Type type,
                               Value sz, bool enableInit) {
-  auto memType = MemRefType::get({ShapedType::kDynamicSize}, type);
+  auto memType = MemRefType::get({ShapedType::kDynamic}, type);
   Value buffer = builder.create<memref::AllocOp>(loc, memType, sz);
   if (enableInit) {
     Value fillValue =
@@ -792,7 +792,7 @@ public:
     assert(sz); // This for sure is a sparse tensor
     // Generate a memref for `sz` elements of type `t`.
     auto genAlloc = [&](Type t) {
-      auto memTp = MemRefType::get({ShapedType::kDynamicSize}, t);
+      auto memTp = MemRefType::get({ShapedType::kDynamic}, t);
       return rewriter.create<memref::AllocOp>(loc, memTp, ValueRange{*sz});
     };
     // Allocate temporary buffers for values/filled-switch and added.
index 8c8bf73..7522e26 100644 (file)
@@ -85,7 +85,7 @@ static Value sizeFromPtrAtDim(OpBuilder &builder, Location loc,
                               SparseTensorEncodingAttr &enc, ShapedType stp,
                               Value src, unsigned i) {
   auto shape = stp.getShape();
-  if (shape[i] == ShapedType::kDynamicSize)
+  if (shape[i] == ShapedType::kDynamic)
     return genLvlSizeCall(builder, loc, enc, src, i);
   return constantIndex(builder, loc, shape[i]);
 }
@@ -104,7 +104,7 @@ static void sizesFromType(OpBuilder &builder, SmallVectorImpl<Value> &sizes,
                           Location loc, ShapedType stp) {
   auto shape = stp.getShape();
   for (unsigned i = 0, rank = stp.getRank(); i < rank; i++) {
-    uint64_t s = shape[i] == ShapedType::kDynamicSize ? 0 : shape[i];
+    uint64_t s = shape[i] == ShapedType::kDynamic ? 0 : shape[i];
     sizes.push_back(constantIndex(builder, loc, s));
   }
 }
@@ -129,7 +129,7 @@ static void concatSizesFromInputs(OpBuilder &builder,
     sizesFromSrc(builder, sizes, loc, srcs[0]);
 
   // Sum up on the `dim` if the dimension is dynamic.
-  if (dstShape[dim] != ShapedType::kDynamicSize) {
+  if (dstShape[dim] != ShapedType::kDynamic) {
     // Faithfully take the static size.
     sizes[dim] = constantIndex(builder, loc, dstShape[dim]);
   } else {
@@ -151,7 +151,7 @@ static void concatSizesFromInputs(OpBuilder &builder,
 /// `memref<$sz x $tp>`). Unlike temporary buffers on the stack,
 /// this buffer must be explicitly deallocated by client.
 static Value genAlloc(RewriterBase &rewriter, Location loc, Value sz, Type tp) {
-  auto memTp = MemRefType::get({ShapedType::kDynamicSize}, tp);
+  auto memTp = MemRefType::get({ShapedType::kDynamic}, tp);
   return rewriter.create<memref::AllocOp>(loc, memTp, ValueRange{sz});
 }
 
@@ -1037,7 +1037,7 @@ public:
     Location loc = op.getLoc();
     // Query values array size for the actually stored values size.
     Type eltType = op.getTensor().getType().cast<ShapedType>().getElementType();
-    auto resTp = MemRefType::get({ShapedType::kDynamicSize}, eltType);
+    auto resTp = MemRefType::get({ShapedType::kDynamic}, eltType);
     Value values = genValuesCall(rewriter, loc, resTp, adaptor.getOperands());
     rewriter.replaceOpWithNewOp<memref::DimOp>(op, values,
                                                constantIndex(rewriter, loc, 0));
index 7d7f8c2..1472b67 100644 (file)
@@ -117,7 +117,7 @@ static void sizesForTensor(OpBuilder &builder, SmallVectorImpl<Value> &sizes,
                            Location loc, ShapedType stp, Value tensor) {
   for (const auto &d : enumerate(stp.getShape())) {
     Value dim;
-    if (d.value() == ShapedType::kDynamicSize)
+    if (d.value() == ShapedType::kDynamic)
       dim = builder.create<tensor::DimOp>(loc, tensor, d.index());
     else
       dim = constantIndex(builder, loc, d.value());
@@ -165,7 +165,7 @@ static void getDynamicSizes(RankedTensorType tp,
                             const SmallVectorImpl<Value> &sizes,
                             SmallVectorImpl<Value> &dynSizes) {
   for (const auto &d : enumerate(tp.getShape())) {
-    if (d.value() == ShapedType::kDynamicSize)
+    if (d.value() == ShapedType::kDynamic)
       dynSizes.push_back(sizes[d.index()]);
   }
 }
@@ -375,7 +375,7 @@ public:
       genReshapeDstShape(loc, rewriter, dstSizes, srcSizes, dstShape,
                          op.getReassociationIndices());
       for (auto &d : llvm::enumerate(dstShape)) {
-        if (d.value() == ShapedType::kDynamicSize)
+        if (d.value() == ShapedType::kDynamic)
           dstDynSizes.push_back(dstSizes[d.index()]);
       }
     }
@@ -465,7 +465,7 @@ struct ConcatenateRewriter : public OpRewritePattern<ConcatenateOp> {
     if (!rtp.hasStaticShape()) {
       ArrayRef<int64_t> rShape = rtp.getShape();
       for (const auto &d : llvm::enumerate(rShape)) {
-        if (d.value() == ShapedType::kDynamicSize) {
+        if (d.value() == ShapedType::kDynamic) {
           Value v =
               createOrFoldDimOp(rewriter, loc, op.getOperand(0), d.index());
           rewriter.create<tensor::DimOp>(loc, op.getOperand(0), d.index());
@@ -705,7 +705,7 @@ private:
     // Sort the COO tensor so that its elements are ordered via increasing
     // indices for the storage ordering of the dst tensor.
     SparseTensorEncodingAttr encSrc = getSparseTensorEncoding(srcTp);
-    auto dynShape = {ShapedType::kDynamicSize};
+    auto dynShape = {ShapedType::kDynamic};
     auto indTp =
         MemRefType::get(dynShape, getIndexOverheadType(rewriter, encSrc));
     uint64_t rank = dstTp.getRank();
@@ -888,7 +888,7 @@ struct NewRewriter : public OpRewritePattern<NewOp> {
           .getResult(0);
       ArrayRef<int64_t> dstShape = dstTp.getShape();
       for (auto &d : llvm::enumerate(dstShape)) {
-        if (d.value() == ShapedType::kDynamicSize) {
+        if (d.value() == ShapedType::kDynamic) {
           dynSizesArray.push_back(rewriter.create<memref::LoadOp>(
               loc, dimSizes, constantIndex(rewriter, loc, d.index())));
         }
index 31d4cc7..801f33b 100644 (file)
@@ -836,7 +836,7 @@ static void genExpansion(Merger &merger, CodeGen &codegen, OpBuilder &builder,
   Value tensor = lhs->get();
   Location loc = op.getLoc();
   if (atStart) {
-    auto dynShape = {ShapedType::kDynamicSize};
+    auto dynShape = {ShapedType::kDynamic};
     Type etp = tensor.getType().cast<ShapedType>().getElementType();
     Type t1 = MemRefType::get(dynShape, etp);
     Type t2 = MemRefType::get(dynShape, builder.getI1Type());
index 31d892f..019cffe 100644 (file)
@@ -518,7 +518,7 @@ void EmptyOp::build(OpBuilder &builder, OperationState &result,
   SmallVector<int64_t> staticShape;
   SmallVector<Value> dynamicSizes;
   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticShape,
-                             ShapedType::kDynamicSize);
+                             ShapedType::kDynamic);
   build(builder, result, staticShape, elementType, dynamicSizes, encoding);
 }
 
@@ -1159,7 +1159,7 @@ struct StaticTensorGenerate : public OpRewritePattern<GenerateOp> {
       }
       APInt index;
       if (!matchPattern(*operandsIt, m_ConstantInt(&index))) {
-        newShape.push_back(ShapedType::kDynamicSize);
+        newShape.push_back(ShapedType::kDynamic);
         newOperands.push_back(*operandsIt++);
         continue;
       }
@@ -1333,8 +1333,8 @@ computeTensorReshapeCollapsedType(RankedTensorType type,
     unsigned dim = m.getNumResults();
     auto band = shape.slice(currentDim, dim);
     int64_t size = 1;
-    if (llvm::is_contained(band, ShapedType::kDynamicSize))
-      size = ShapedType::kDynamicSize;
+    if (llvm::is_contained(band, ShapedType::kDynamic))
+      size = ShapedType::kDynamic;
     else
       for (unsigned d = 0; d < dim; ++d)
         size *= shape[currentDim + d];
@@ -1526,11 +1526,11 @@ RankedTensorType ExtractSliceOp::inferResultType(
   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
-                             ShapedType::kDynamicStrideOrOffset);
+                             ShapedType::kDynamic);
   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
-                             ShapedType::kDynamicSize);
+                             ShapedType::kDynamic);
   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
-                             ShapedType::kDynamicStrideOrOffset);
+                             ShapedType::kDynamic);
   return ExtractSliceOp::inferResultType(sourceShapedTensorType, staticOffsets,
                                          staticSizes, staticStrides);
 }
@@ -1574,11 +1574,11 @@ RankedTensorType ExtractSliceOp::inferCanonicalRankReducedResultType(
   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
-                             ShapedType::kDynamicStrideOrOffset);
+                             ShapedType::kDynamic);
   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
-                             ShapedType::kDynamicSize);
+                             ShapedType::kDynamic);
   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
-                             ShapedType::kDynamicStrideOrOffset);
+                             ShapedType::kDynamic);
   return ExtractSliceOp::inferCanonicalRankReducedResultType(
       desiredResultRank, sourceRankedTensorType, staticOffsets, staticSizes,
       staticStrides);
@@ -1595,11 +1595,11 @@ void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
-                             ShapedType::kDynamicStrideOrOffset);
+                             ShapedType::kDynamic);
   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
-                             ShapedType::kDynamicSize);
+                             ShapedType::kDynamic);
   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
-                             ShapedType::kDynamicStrideOrOffset);
+                             ShapedType::kDynamic);
   auto sourceRankedTensorType = source.getType().cast<RankedTensorType>();
   // Structuring implementation this way avoids duplication between builders.
   if (!resultType) {
@@ -1846,13 +1846,13 @@ public:
 
     // Check if there are any dynamic parts, which are not supported.
     auto offsets = extractFromI64ArrayAttr(op.getStaticOffsets());
-    if (llvm::is_contained(offsets, ShapedType::kDynamicStrideOrOffset))
+    if (llvm::is_contained(offsets, ShapedType::kDynamic))
       return failure();
     auto sizes = extractFromI64ArrayAttr(op.getStaticSizes());
-    if (llvm::is_contained(sizes, ShapedType::kDynamicSize))
+    if (llvm::is_contained(sizes, ShapedType::kDynamic))
       return failure();
     auto strides = extractFromI64ArrayAttr(op.getStaticStrides());
-    if (llvm::is_contained(strides, ShapedType::kDynamicStrideOrOffset))
+    if (llvm::is_contained(strides, ShapedType::kDynamic))
       return failure();
 
     // Compute the stride for each dimension.
@@ -2014,11 +2014,11 @@ void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
-                             ShapedType::kDynamicStrideOrOffset);
+                             ShapedType::kDynamic);
   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
-                             ShapedType::kDynamicSize);
+                             ShapedType::kDynamic);
   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
-                             ShapedType::kDynamicStrideOrOffset);
+                             ShapedType::kDynamic);
   build(b, result, dest.getType(), source, dest, dynamicOffsets, dynamicSizes,
         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
@@ -2168,9 +2168,9 @@ public:
     SmallVector<OpFoldResult> mixedOffsets(insertSliceOp.getMixedOffsets());
     SmallVector<OpFoldResult> mixedSizes(insertSliceOp.getMixedSizes());
     SmallVector<OpFoldResult> mixedStrides(insertSliceOp.getMixedStrides());
-    canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamicStrideOrOffset);
+    canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamic);
     canonicalizeSubViewPart(mixedSizes, ShapedType::isDynamic);
-    canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamicStrideOrOffset);
+    canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamic);
 
     // Create the new op in canonical form.
     auto sourceType = ExtractSliceOp::inferCanonicalRankReducedResultType(
@@ -2430,14 +2430,14 @@ RankedTensorType PadOp::inferResultType(RankedTensorType sourceType,
   SmallVector<int64_t, 4> inferredShape;
   for (auto i : llvm::seq<unsigned>(0, rank)) {
     if (sourceType.isDynamicDim(i) ||
-        staticLow[i] == ShapedType::kDynamicSize ||
-        staticHigh[i] == ShapedType::kDynamicSize) {
-      inferredShape.push_back(resultShape.empty() ? ShapedType::kDynamicSize
+        staticLow[i] == ShapedType::kDynamic ||
+        staticHigh[i] == ShapedType::kDynamic) {
+      inferredShape.push_back(resultShape.empty() ? ShapedType::kDynamic
                                                   : resultShape[i]);
     } else {
       int64_t size = sourceType.getDimSize(i) + staticLow[i] + staticHigh[i];
       assert((resultShape.empty() || size == resultShape[i] ||
-              resultShape[i] == ShapedType::kDynamicSize) &&
+              resultShape[i] == ShapedType::kDynamic) &&
              "mismatch between inferred shape and result shape");
       inferredShape.push_back(size);
     }
@@ -2462,7 +2462,7 @@ void PadOp::build(OpBuilder &b, OperationState &result, Value source,
                   ArrayRef<NamedAttribute> attrs) {
   auto sourceType = source.getType().cast<RankedTensorType>();
   unsigned rank = sourceType.getRank();
-  SmallVector<int64_t, 4> staticVector(rank, ShapedType::kDynamicSize);
+  SmallVector<int64_t, 4> staticVector(rank, ShapedType::kDynamic);
   build(b, result, source, staticVector, staticVector, low, high, nofold,
         attrs);
 }
@@ -2479,9 +2479,9 @@ void PadOp::build(OpBuilder &b, OperationState &result, Type resultType,
   // dynamic (ie not a constant), dynamicLow and dynamicHigh will grow with 1
   // value as well.
   dispatchIndexOpFoldResults(low, dynamicLow, staticLow,
-                             ShapedType::kDynamicSize);
+                             ShapedType::kDynamic);
   dispatchIndexOpFoldResults(high, dynamicHigh, staticHigh,
-                             ShapedType::kDynamicSize);
+                             ShapedType::kDynamic);
   if (!resultType) {
     resultType = PadOp::inferResultType(sourceType, staticLow, staticHigh);
   }
@@ -2830,11 +2830,11 @@ void ParallelInsertSliceOp::build(OpBuilder &b, OperationState &result,
   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
-                             ShapedType::kDynamicStrideOrOffset);
+                             ShapedType::kDynamic);
   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
-                             ShapedType::kDynamicSize);
+                             ShapedType::kDynamic);
   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
-                             ShapedType::kDynamicStrideOrOffset);
+                             ShapedType::kDynamic);
   build(b, result, {}, source, dest, dynamicOffsets, dynamicSizes,
         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
index 443a06d..49d31d2 100644 (file)
@@ -113,7 +113,7 @@ Operation *tensor::bubbleUpPadSlice(OpBuilder &b, tensor::PadOp padOp,
     if (auto constInt = getConstantIntValue(val)) {
       staticIndices.push_back(*constInt);
     } else {
-      staticIndices.push_back(ShapedType::kDynamicSize);
+      staticIndices.push_back(ShapedType::kDynamic);
       dynIndices.push_back(val);
     }
   };
@@ -216,7 +216,7 @@ Operation *tensor::bubbleUpPadSlice(OpBuilder &b, tensor::PadOp padOp,
   // The shape of the result can be obtained from the sizes passed in.
   SmallVector<Value> dynDims;
   SmallVector<int64_t> shape;
-  dispatchIndexOpFoldResults(sizes, dynDims, shape, ShapedType::kDynamicSize);
+  dispatchIndexOpFoldResults(sizes, dynDims, shape, ShapedType::kDynamic);
   RankedTensorType resultType =
       RankedTensorType::get(shape, padOp.getResultType().getElementType());
 
index 5f6d180..67f1ce1 100644 (file)
@@ -44,7 +44,7 @@ SmallVector<Value> mlir::tensor::createDynamicDimValues(OpBuilder &b,
   auto tensorTy = rankedTensor.getType().cast<RankedTensorType>();
   SmallVector<Value> dynamicDims;
   for (const auto &en : llvm::enumerate(tensorTy.getShape())) {
-    if (en.value() == ShapedType::kDynamicSize)
+    if (en.value() == ShapedType::kDynamic)
       dynamicDims.push_back(
           b.create<tensor::DimOp>(loc, rankedTensor, en.index()));
   }
index e33d2bc..d5ae119 100644 (file)
@@ -413,13 +413,13 @@ LogicalResult tosa::ConcatOp::inferReturnTypeComponents(
 
     // Copy the Operand's rank.
     if (!hasRankedInput)
-      outputShape.resize(operandShape.getRank(), ShapedType::kDynamicSize);
+      outputShape.resize(operandShape.getRank(), ShapedType::kDynamic);
 
     // Copy shapes until the dim is non-dynamic.
     for (int i = 0, s = operandShape.getRank(); i < s; i++) {
       if (i == axis || operandShape.isDynamicDim(i))
         continue;
-      if (outputShape[i] == ShapedType::kDynamicSize)
+      if (outputShape[i] == ShapedType::kDynamic)
         outputShape[i] = operandShape.getDimSize(i);
       if (outputShape[i] != operandShape.getDimSize(i))
         return failure();
@@ -441,7 +441,7 @@ LogicalResult tosa::ConcatOp::inferReturnTypeComponents(
     // We need to know the length of the concatenation axis of all inputs to
     // determine the dimension size of the output shape.
     if (!operandShape.hasRank() || operandShape.isDynamicDim(axis)) {
-      concatDimSize = ShapedType::kDynamicSize;
+      concatDimSize = ShapedType::kDynamic;
       break;
     }
 
@@ -485,7 +485,7 @@ LogicalResult tosa::FullyConnectedOp::inferReturnTypeComponents(
 
   // All shapes are dynamic.
   SmallVector<int64_t> outShape;
-  outShape.resize(2, ShapedType::kDynamicSize);
+  outShape.resize(2, ShapedType::kDynamic);
 
   if (inputShape.hasRank()) {
     outShape[0] = inputShape.getDimSize(0);
@@ -496,7 +496,7 @@ LogicalResult tosa::FullyConnectedOp::inferReturnTypeComponents(
   }
 
   if (biasShape.hasRank()) {
-    outShape[1] = outShape[1] == ShapedType::kDynamicSize
+    outShape[1] = outShape[1] == ShapedType::kDynamic
                       ? biasShape.getDimSize(0)
                       : outShape[1];
   }
@@ -516,7 +516,7 @@ LogicalResult tosa::MatMulOp::inferReturnTypeComponents(
 
   // All shapes are dynamic.
   SmallVector<int64_t> outShape;
-  outShape.resize(3, ShapedType::kDynamicSize);
+  outShape.resize(3, ShapedType::kDynamic);
 
   if (lhsShape.hasRank()) {
     outShape[0] = lhsShape.getDimSize(0);
@@ -524,7 +524,7 @@ LogicalResult tosa::MatMulOp::inferReturnTypeComponents(
   }
 
   if (rhsShape.hasRank()) {
-    outShape[0] = outShape[0] == ShapedType::kDynamicSize
+    outShape[0] = outShape[0] == ShapedType::kDynamic
                       ? rhsShape.getDimSize(0)
                       : outShape[0];
     outShape[2] = rhsShape.getDimSize(2);
@@ -557,7 +557,7 @@ LogicalResult tosa::PadOp::inferReturnTypeComponents(
       return success();
     }
 
-    outputShape.resize(paddingShape.getDimSize(0), ShapedType::kDynamicSize);
+    outputShape.resize(paddingShape.getDimSize(0), ShapedType::kDynamic);
     inferredReturnShapes.push_back(ShapedTypeComponents(outputShape));
     return success();
   }
@@ -565,7 +565,7 @@ LogicalResult tosa::PadOp::inferReturnTypeComponents(
   DenseIntElementsAttr paddings;
   // If the paddings value is not a constant, all dimensions must be dynamic.
   if (!matchPattern(operands[1], m_Constant(&paddings))) {
-    outputShape.resize(inputShape.getRank(), ShapedType::kDynamicSize);
+    outputShape.resize(inputShape.getRank(), ShapedType::kDynamic);
     inferredReturnShapes.push_back(ShapedTypeComponents(outputShape));
     return success();
   }
@@ -578,7 +578,7 @@ LogicalResult tosa::PadOp::inferReturnTypeComponents(
   outputShape.reserve(inputShape.getRank());
   for (int i = 0, s = inputShape.getRank(); i < s; i++) {
     if (inputShape.isDynamicDim(i)) {
-      outputShape.push_back(ShapedType::kDynamicSize);
+      outputShape.push_back(ShapedType::kDynamic);
       continue;
     }
 
@@ -592,7 +592,7 @@ LogicalResult tosa::PadOp::inferReturnTypeComponents(
 
 static SmallVector<int64_t> convertToMlirShape(ArrayRef<int64_t> shape) {
   return to_vector(llvm::map_range(shape, [](int64_t dim) {
-    return dim == -1 ? ShapedType::kDynamicSize : dim;
+    return dim == -1 ? ShapedType::kDynamic : dim;
   }));
 }
 
@@ -637,7 +637,7 @@ LogicalResult tosa::TileOp::inferReturnTypeComponents(
   ShapeAdaptor inputShape = operands.getShape(0);
   SmallVector<int64_t> outputShape;
   if (!inputShape.hasRank()) {
-    outputShape.resize(multiples.size(), ShapedType::kDynamicSize);
+    outputShape.resize(multiples.size(), ShapedType::kDynamic);
     inferredReturnShapes.push_back(ShapedTypeComponents(outputShape));
     return success();
   }
@@ -653,7 +653,7 @@ LogicalResult tosa::TileOp::inferReturnTypeComponents(
   outputShape.reserve(multiples.size());
   for (int i = 0, s = inputShape.getRank(); i < s; i++) {
     int64_t dim = inputShape.getDimSize(i);
-    if (dim != ShapedType::kDynamicSize)
+    if (dim != ShapedType::kDynamic)
       dim *= multipleValues[i];
     outputShape.push_back(dim);
   }
@@ -662,7 +662,6 @@ LogicalResult tosa::TileOp::inferReturnTypeComponents(
   return success();
 }
 
-
 LogicalResult tosa::ReshapeOp::inferReturnTypeComponents(
     MLIRContext *context, ::llvm::Optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
@@ -728,7 +727,7 @@ LogicalResult tosa::TransposeOp::inferReturnTypeComponents(
   // can determine the output rank.
   SmallVector<int64_t> outputShape;
   if (!inputShape.hasRank()) {
-    outputShape.resize(permsShape.getDimSize(0), ShapedType::kDynamicSize);
+    outputShape.resize(permsShape.getDimSize(0), ShapedType::kDynamic);
     inferredReturnShapes.push_back(ShapedTypeComponents(outputShape));
     return success();
   }
@@ -756,7 +755,7 @@ LogicalResult tosa::TransposeOp::inferReturnTypeComponents(
     return success();
   }
 
-  outputShape.resize(inputShape.getRank(), ShapedType::kDynamicSize);
+  outputShape.resize(inputShape.getRank(), ShapedType::kDynamic);
   // If the permuations are a constant we can directly determine the output
   // shape.
   if (ShapeAdaptor permShape = operands.getValueAsShape(1)) {
@@ -775,7 +774,7 @@ LogicalResult tosa::GatherOp::inferReturnTypeComponents(
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   llvm::SmallVector<int64_t> outputShape;
-  outputShape.resize(3, ShapedType::kDynamicSize);
+  outputShape.resize(3, ShapedType::kDynamic);
 
   ShapeAdaptor valuesShape = operands.getShape(0);
   if (valuesShape.hasRank()) {
@@ -785,9 +784,9 @@ LogicalResult tosa::GatherOp::inferReturnTypeComponents(
 
   ShapeAdaptor indicesShape = operands.getShape(1);
   if (indicesShape.hasRank()) {
-    if (outputShape[0] == ShapedType::kDynamicSize)
+    if (outputShape[0] == ShapedType::kDynamic)
       outputShape[0] = indicesShape.getDimSize(0);
-    if (outputShape[1] == ShapedType::kDynamicSize)
+    if (outputShape[1] == ShapedType::kDynamic)
       outputShape[1] = indicesShape.getDimSize(1);
   }
 
@@ -801,7 +800,7 @@ LogicalResult tosa::ResizeOp::inferReturnTypeComponents(
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   ResizeOpAdaptor adaptor(operands, attributes);
   llvm::SmallVector<int64_t, 4> outputShape;
-  outputShape.resize(4, ShapedType::kDynamicSize);
+  outputShape.resize(4, ShapedType::kDynamic);
 
   ShapeAdaptor inputShape = operands.getShape(adaptor.getInput());
   if (!inputShape.hasRank())
@@ -812,8 +811,8 @@ LogicalResult tosa::ResizeOp::inferReturnTypeComponents(
   int64_t inputHeight = inputShape.getDimSize(1);
   int64_t inputWidth = inputShape.getDimSize(2);
 
-  if ((inputHeight == ShapedType::kDynamicSize) ||
-      (inputWidth == ShapedType::kDynamicSize))
+  if ((inputHeight == ShapedType::kDynamic) ||
+      (inputWidth == ShapedType::kDynamic))
     return failure();
 
   llvm::SmallVector<int64_t> scaleInt;
@@ -843,7 +842,7 @@ LogicalResult tosa::ScatterOp::inferReturnTypeComponents(
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   llvm::SmallVector<int64_t> outputShape;
-  outputShape.resize(3, ShapedType::kDynamicSize);
+  outputShape.resize(3, ShapedType::kDynamic);
 
   ShapeAdaptor valuesInShape = operands.getShape(0);
   if (valuesInShape.hasRank()) {
@@ -854,15 +853,15 @@ LogicalResult tosa::ScatterOp::inferReturnTypeComponents(
 
   ShapeAdaptor indicesShape = operands.getShape(1);
   if (indicesShape.hasRank()) {
-    if (outputShape[0] == ShapedType::kDynamicSize)
+    if (outputShape[0] == ShapedType::kDynamic)
       outputShape[0] = indicesShape.getDimSize(0);
   }
 
   ShapeAdaptor inputShape = operands.getShape(2);
   if (inputShape.hasRank()) {
-    if (outputShape[0] == ShapedType::kDynamicSize)
+    if (outputShape[0] == ShapedType::kDynamic)
       outputShape[0] = inputShape.getDimSize(0);
-    if (outputShape[2] == ShapedType::kDynamicSize)
+    if (outputShape[2] == ShapedType::kDynamic)
       outputShape[2] = inputShape.getDimSize(2);
   }
 
@@ -970,7 +969,7 @@ static LogicalResult poolingInferReturnTypes(
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   ShapeAdaptor inputShape = operands.getShape(0);
   llvm::SmallVector<int64_t> outputShape;
-  outputShape.resize(4, ShapedType::kDynamicSize);
+  outputShape.resize(4, ShapedType::kDynamic);
 
   // We only know the rank if the input type is unranked.
   if (!inputShape) {
@@ -1011,13 +1010,13 @@ LogicalResult Conv2DOp::inferReturnTypeComponents(
     MLIRContext *context, ::llvm::Optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
-  llvm::SmallVector<int64_t> outputShape(4, ShapedType::kDynamicSize);
+  llvm::SmallVector<int64_t> outputShape(4, ShapedType::kDynamic);
   Conv2DOp::Adaptor adaptor(operands.getValues(), attributes);
 
-  int64_t inputWidth = ShapedType::kDynamicSize;
-  int64_t inputHeight = ShapedType::kDynamicSize;
-  int64_t weightWidth = ShapedType::kDynamicSize;
-  int64_t weightHeight = ShapedType::kDynamicSize;
+  int64_t inputWidth = ShapedType::kDynamic;
+  int64_t inputHeight = ShapedType::kDynamic;
+  int64_t weightWidth = ShapedType::kDynamic;
+  int64_t weightHeight = ShapedType::kDynamic;
 
   // Input shape describes input width/height and batch.
 
@@ -1078,16 +1077,16 @@ LogicalResult Conv3DOp::inferReturnTypeComponents(
     MLIRContext *context, ::llvm::Optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
-  llvm::SmallVector<int64_t> outputShape(5, ShapedType::kDynamicSize);
+  llvm::SmallVector<int64_t> outputShape(5, ShapedType::kDynamic);
   Conv3DOp::Adaptor adaptor(operands.getValues(), attributes);
 
-  int64_t inputWidth = ShapedType::kDynamicSize;
-  int64_t inputHeight = ShapedType::kDynamicSize;
-  int64_t inputDepth = ShapedType::kDynamicSize;
+  int64_t inputWidth = ShapedType::kDynamic;
+  int64_t inputHeight = ShapedType::kDynamic;
+  int64_t inputDepth = ShapedType::kDynamic;
 
-  int64_t weightWidth = ShapedType::kDynamicSize;
-  int64_t weightHeight = ShapedType::kDynamicSize;
-  int64_t weightDepth = ShapedType::kDynamicSize;
+  int64_t weightWidth = ShapedType::kDynamic;
+  int64_t weightHeight = ShapedType::kDynamic;
+  int64_t weightDepth = ShapedType::kDynamic;
 
   // Input shape describes input width/height and batch.
   ShapeAdaptor inputShape = operands.getShape(adaptor.getInput());
@@ -1169,16 +1168,16 @@ LogicalResult DepthwiseConv2DOp::inferReturnTypeComponents(
     MLIRContext *context, ::llvm::Optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
-  llvm::SmallVector<int64_t> outputShape(4, ShapedType::kDynamicSize);
+  llvm::SmallVector<int64_t> outputShape(4, ShapedType::kDynamic);
   DepthwiseConv2DOp::Adaptor adaptor(operands.getValues(), attributes);
 
-  int64_t inputWidth = ShapedType::kDynamicSize;
-  int64_t inputHeight = ShapedType::kDynamicSize;
-  int64_t inputChannels = ShapedType::kDynamicSize;
+  int64_t inputWidth = ShapedType::kDynamic;
+  int64_t inputHeight = ShapedType::kDynamic;
+  int64_t inputChannels = ShapedType::kDynamic;
 
-  int64_t weightWidth = ShapedType::kDynamicSize;
-  int64_t weightHeight = ShapedType::kDynamicSize;
-  int64_t depthChannels = ShapedType::kDynamicSize;
+  int64_t weightWidth = ShapedType::kDynamic;
+  int64_t weightHeight = ShapedType::kDynamic;
+  int64_t depthChannels = ShapedType::kDynamic;
 
   // Input shape describes input width/height and batch.
   ShapeAdaptor inputShape = operands.getShape(adaptor.getInput());
@@ -1254,10 +1253,10 @@ LogicalResult TransposeConv2DOp::inferReturnTypeComponents(
   getI64Values(adaptor.getOutShape(), outputShape);
   outputShape = convertToMlirShape(outputShape);
 
-  int64_t inputWidth = ShapedType::kDynamicSize;
-  int64_t inputHeight = ShapedType::kDynamicSize;
-  int64_t weightWidth = ShapedType::kDynamicSize;
-  int64_t weightHeight = ShapedType::kDynamicSize;
+  int64_t inputWidth = ShapedType::kDynamic;
+  int64_t inputHeight = ShapedType::kDynamic;
+  int64_t weightWidth = ShapedType::kDynamic;
+  int64_t weightHeight = ShapedType::kDynamic;
 
   // Input shape describes input width/height and batch.
   ShapeAdaptor inputShape = operands.getShape(adaptor.getInput());
index 65ac820..d705624 100644 (file)
@@ -58,7 +58,7 @@ struct Conv2DIsFullyConnected : public OpRewritePattern<tosa::Conv2DOp> {
 
     // Reshape input to [N,IH,IW,IC] -> [N * IH * IW, IC].
     ArrayRef<int64_t> inputShape = inputType.getShape();
-    int64_t combined = ShapedType::kDynamicSize;
+    int64_t combined = ShapedType::kDynamic;
     if (numDynamic == 0)
       combined = inputShape[0] * inputShape[1] * inputShape[2];
     llvm::SmallVector<int64_t, 2> revisedInputShape{combined, inputShape[3]};
index 1b3b373..5c18fcf 100644 (file)
@@ -94,7 +94,7 @@ bool OpTrait::util::getBroadcastedShape(ArrayRef<int64_t> shape1,
       } else if (*i2 == 1) {
         *iR = *i1;
       } else {
-        *iR = ShapedType::kDynamicSize;
+        *iR = ShapedType::kDynamic;
       }
     } else {
       if (*i1 == *i2 || *i2 == 1) {
index e31d069..b65c027 100644 (file)
@@ -46,7 +46,7 @@ mlir::getReassociationIndicesForCollapse(ArrayRef<int64_t> sourceShape,
       break;
 
     int64_t currTargetShape = targetShape[targetDim];
-    while (sourceShape[sourceDim] != ShapedType::kDynamicSize &&
+    while (sourceShape[sourceDim] != ShapedType::kDynamic &&
            prodOfCollapsedDims * sourceShape[sourceDim] < currTargetShape &&
            sourceDim < sourceShape.size()) {
       prodOfCollapsedDims *= sourceShape[sourceDim];
@@ -56,15 +56,15 @@ mlir::getReassociationIndicesForCollapse(ArrayRef<int64_t> sourceShape,
     // If the current expanded dimension is dynamic, then the collapsed
     // dimensions should also be dynamic and product of all previous unprocessed
     // dimensions of the expanded shape should be 1.
-    if (sourceShape[sourceDim] == ShapedType::kDynamicSize &&
-        (currTargetShape != ShapedType::kDynamicSize ||
+    if (sourceShape[sourceDim] == ShapedType::kDynamic &&
+        (currTargetShape != ShapedType::kDynamic ||
          prodOfCollapsedDims != 1))
       return llvm::None;
 
     // If the collapsed dim is dynamic, the current expanded dim should also
     // be dynamic.
-    if (currTargetShape == ShapedType::kDynamicSize &&
-        sourceShape[sourceDim] != ShapedType::kDynamicSize)
+    if (currTargetShape == ShapedType::kDynamic &&
+        sourceShape[sourceDim] != ShapedType::kDynamic)
       return llvm::None;
 
     // For static shapes, if the product of dimensions of the expanded shape
@@ -83,7 +83,7 @@ mlir::getReassociationIndicesForCollapse(ArrayRef<int64_t> sourceShape,
   // Process any remaining entries in the source shape. They all need to be
   // 1 or dynamic.
   for (; sourceDim < sourceShape.size(); sourceDim++) {
-    if (sourceShape[sourceDim] != ShapedType::kDynamicSize &&
+    if (sourceShape[sourceDim] != ShapedType::kDynamic &&
         sourceShape[sourceDim] != 1)
       return llvm::None;
     // The map is empty when the target type is a scalar.
index 52f2b83..323a173 100644 (file)
@@ -170,13 +170,13 @@ static MemRefType getCastCompatibleMemRefType(MemRefType aT, MemRefType bT) {
       resStrides(bT.getRank(), 0);
   for (int64_t idx = 0, e = aT.getRank(); idx < e; ++idx) {
     resShape[idx] =
-        (aShape[idx] == bShape[idx]) ? aShape[idx] : ShapedType::kDynamicSize;
+        (aShape[idx] == bShape[idx]) ? aShape[idx] : ShapedType::kDynamic;
     resStrides[idx] = (aStrides[idx] == bStrides[idx])
                           ? aStrides[idx]
-                          : ShapedType::kDynamicStrideOrOffset;
+                          : ShapedType::kDynamic;
   }
   resOffset =
-      (aOffset == bOffset) ? aOffset : ShapedType::kDynamicStrideOrOffset;
+      (aOffset == bOffset) ? aOffset : ShapedType::kDynamic;
   return MemRefType::get(
       resShape, aT.getElementType(),
       StridedLayoutAttr::get(aT.getContext(), resOffset, resStrides));
index 8a3c162..a9671d6 100644 (file)
@@ -207,7 +207,7 @@ DictionaryAttr DictionaryAttr::getEmptyUnchecked(MLIRContext *context) {
 /// Prints a strided layout attribute.
 void StridedLayoutAttr::print(llvm::raw_ostream &os) const {
   auto printIntOrQuestion = [&](int64_t value) {
-    if (value == ShapedType::kDynamicStrideOrOffset)
+    if (ShapedType::isDynamic(value))
       os << "?";
     else
       os << value;
@@ -1770,7 +1770,7 @@ AffineMap mlir::makeStridedLinearLayoutMap(ArrayRef<int64_t> strides,
 
   // AffineExpr for offset.
   // Static case.
-  if (offset != MemRefType::getDynamicStrideOrOffset()) {
+  if (!ShapedType::isDynamic(offset)) {
     auto cst = getAffineConstantExpr(offset, context);
     expr = cst;
   } else {
@@ -1787,7 +1787,7 @@ AffineMap mlir::makeStridedLinearLayoutMap(ArrayRef<int64_t> strides,
     auto d = getAffineDimExpr(dim, context);
     AffineExpr mult;
     // Static case.
-    if (stride != MemRefType::getDynamicStrideOrOffset())
+    if (!ShapedType::isDynamic(stride))
       mult = getAffineConstantExpr(stride, context);
     else
       // Dynamic case, new symbol for each new stride.
index aaa2233..bd7f617 100644 (file)
@@ -23,8 +23,8 @@ using namespace mlir::detail;
 // ShapedType
 //===----------------------------------------------------------------------===//
 
-constexpr int64_t ShapedType::kDynamicSize;
-constexpr int64_t ShapedType::kDynamicStrideOrOffset;
+constexpr int64_t ShapedType::kDynamic;
+constexpr int64_t ShapedType::kDynamic;
 
 int64_t ShapedType::getNumElements(ArrayRef<int64_t> shape) {
   int64_t num = 1;
index f4d64c9..85592db 100644 (file)
@@ -608,7 +608,7 @@ LogicalResult MemRefType::verify(function_ref<InFlightDiagnostic()> emitError,
   if (!BaseMemRefType::isValidElementType(elementType))
     return emitError() << "invalid memref element type";
 
-  // Negative sizes are not allowed except for `kDynamicSize`.
+  // Negative sizes are not allowed except for `kDynamic`.
   for (int64_t s : shape)
     if (s < 0 && !ShapedType::isDynamic(s))
       return emitError() << "invalid memref size";
@@ -705,7 +705,7 @@ static LogicalResult extractStrides(AffineExpr e,
 }
 
 /// A stride specification is a list of integer values that are either static
-/// or dynamic (encoded with ShapedType::kDynamicStrideOrOffset). Strides encode
+/// or dynamic (encoded with ShapedType::kDynamic). Strides encode
 /// the distance in the number of elements between successive entries along a
 /// particular dimension.
 ///
@@ -794,12 +794,12 @@ LogicalResult mlir::getStridesAndOffset(MemRefType t,
   if (auto cst = offsetExpr.dyn_cast<AffineConstantExpr>())
     offset = cst.getValue();
   else
-    offset = ShapedType::kDynamicStrideOrOffset;
+    offset = ShapedType::kDynamic;
   for (auto e : strideExprs) {
     if (auto c = e.dyn_cast<AffineConstantExpr>())
       strides.push_back(c.getValue());
     else
-      strides.push_back(ShapedType::kDynamicStrideOrOffset);
+      strides.push_back(ShapedType::kDynamic);
   }
   return success();
 }
index 89ebd81..f818df1 100644 (file)
@@ -55,17 +55,17 @@ mlir::detail::verifyOffsetSizeAndStrideOp(OffsetSizeAndStrideOpInterface op) {
            << op.getMixedSizes().size() << " vs " << op.getMixedStrides().size()
            << ") so the rank of the result type is well-formed.";
 
-  if (failed(verifyListOfOperandsOrIntegers(
-          op, "offset", maxRanks[0], op.static_offsets(), op.offsets(),
-          ShapedType::isDynamicStrideOrOffset)))
+  if (failed(verifyListOfOperandsOrIntegers(op, "offset", maxRanks[0],
+                                            op.static_offsets(), op.offsets(),
+                                            ShapedType::isDynamic)))
     return failure();
   if (failed(verifyListOfOperandsOrIntegers(op, "size", maxRanks[1],
                                             op.static_sizes(), op.sizes(),
                                             ShapedType::isDynamic)))
     return failure();
-  if (failed(verifyListOfOperandsOrIntegers(
-          op, "stride", maxRanks[2], op.static_strides(), op.strides(),
-          ShapedType::isDynamicStrideOrOffset)))
+  if (failed(verifyListOfOperandsOrIntegers(op, "stride", maxRanks[2],
+                                            op.static_strides(), op.strides(),
+                                            ShapedType::isDynamic)))
     return failure();
   return success();
 }
@@ -166,13 +166,12 @@ mlir::getMixedValues(ArrayAttr staticValues, ValueRange dynamicValues,
 SmallVector<OpFoldResult, 4>
 mlir::getMixedStridesOrOffsets(ArrayAttr staticValues,
                                ValueRange dynamicValues) {
-  return getMixedValues(staticValues, dynamicValues,
-                        ShapedType::kDynamicStrideOrOffset);
+  return getMixedValues(staticValues, dynamicValues, ShapedType::kDynamic);
 }
 
 SmallVector<OpFoldResult, 4> mlir::getMixedSizes(ArrayAttr staticValues,
                                                  ValueRange dynamicValues) {
-  return getMixedValues(staticValues, dynamicValues, ShapedType::kDynamicSize);
+  return getMixedValues(staticValues, dynamicValues, ShapedType::kDynamic);
 }
 
 std::pair<ArrayAttr, SmallVector<Value>>
@@ -194,12 +193,11 @@ mlir::decomposeMixedValues(Builder &b,
 
 std::pair<ArrayAttr, SmallVector<Value>> mlir::decomposeMixedStridesOrOffsets(
     OpBuilder &b, const SmallVectorImpl<OpFoldResult> &mixedValues) {
-  return decomposeMixedValues(b, mixedValues,
-                              ShapedType::kDynamicStrideOrOffset);
+  return decomposeMixedValues(b, mixedValues, ShapedType::kDynamic);
 }
 
 std::pair<ArrayAttr, SmallVector<Value>>
 mlir::decomposeMixedSizes(OpBuilder &b,
                           const SmallVectorImpl<OpFoldResult> &mixedValues) {
-  return decomposeMixedValues(b, mixedValues, ShapedType::kDynamicSize);
+  return decomposeMixedValues(b, mixedValues, ShapedType::kDynamic);
 }
index 30bf50c..c9e72f8 100644 (file)
@@ -41,13 +41,13 @@ void TestMemRefStrideCalculation::runOnOperation() {
       return;
     }
     llvm::outs() << "MemRefType offset: ";
-    if (offset == MemRefType::getDynamicStrideOrOffset())
+    if (ShapedType::isDynamic(offset))
       llvm::outs() << "?";
     else
       llvm::outs() << offset;
     llvm::outs() << " strides: ";
     llvm::interleaveComma(strides, llvm::outs(), [&](int64_t v) {
-      if (v == MemRefType::getDynamicStrideOrOffset())
+      if (ShapedType::isDynamic(v))
         llvm::outs() << "?";
       else
         llvm::outs() << v;
index b560df9..93e4fba 100644 (file)
@@ -1172,7 +1172,7 @@ LogicalResult OpWithShapedTypeInferTypeInterfaceOp::inferReturnTypeComponents(
     return emitOptionalError(location, "only shaped type operands allowed");
   }
   int64_t dim =
-      sval.hasRank() ? sval.getShape().front() : ShapedType::kDynamicSize;
+      sval.hasRank() ? sval.getShape().front() : ShapedType::kDynamic;
   auto type = IntegerType::get(context, 17);
   inferredReturnShapes.push_back(ShapedTypeComponents({dim}, type));
   return success();
index f1ab02d..db8afe5 100644 (file)
@@ -47,7 +47,7 @@ TEST(BroadcastShapeTest, InterleavingOnes) {
 
 TEST(BroadcastShapeTest, InterleavingUnknowns) {
   SmallVector<int64_t, 4> result;
-  int64_t dyn = mlir::ShapedType::kDynamicSize;
+  int64_t dyn = mlir::ShapedType::kDynamic;
   ASSERT_TRUE(getBroadcastedShape({1, 2, dyn, dyn, dyn}, {dyn, dyn, dyn, 4, 1},
                                   result));
   EXPECT_THAT(result, ElementsAre(dyn, 2, dyn, 4, dyn));