static constexpr LenType singleton() { return 1; }
/// Character has a LEN value which is not a compile-time known constant.
- static constexpr LenType unknownLen() { return mlir::ShapedType::kDynamicSize; }
+ static constexpr LenType unknownLen() { return mlir::ShapedType::kDynamic; }
/// Character LEN is a runtime value.
bool hasDynamicLen() { return getLen() == unknownLen(); }
// Does the sequence have unknown shape? (`array<* x T>`)
bool hasUnknownShape() const { return getShape().empty(); }
- // The value `kDynamicSize` represents an unknown extent for a dimension
+ // The value `kDynamic` represents an unknown extent for a dimension
static constexpr Extent getUnknownExtent() {
- return mlir::ShapedType::kDynamicSize;
+ return mlir::ShapedType::kDynamic;
}
}];
}
affineMap, indexArgs);
auto arrayElementType = coordinateArrayElement(acoOp);
auto newType =
- mlir::MemRefType::get({mlir::ShapedType::kDynamicSize}, arrayElementType);
+ mlir::MemRefType::get({mlir::ShapedType::kDynamic}, arrayElementType);
auto arrayConvert = rewriter.create<fir::ConvertOp>(acoOp.getLoc(), newType,
acoOp.getMemref());
return std::make_pair(affineApply, arrayConvert);
SmallVector<OpFoldResult> mixedOffsets(op.getMixedOffsets());
SmallVector<OpFoldResult> mixedSizes(op.getMixedSizes());
SmallVector<OpFoldResult> mixedStrides(op.getMixedStrides());
- canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamicStrideOrOffset);
+ canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamic);
canonicalizeSubViewPart(mixedSizes, ShapedType::isDynamic);
- canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamicStrideOrOffset);
+ canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamic);
// Create the new op in canonical form.
ResultTypeFunc resultTypeFunc;
/*desc=*/[{
Like `getShape`, but only returns statically-known information, without
generating any new IR. For each shape dimension, returns >=0 if that
- dimension is statically known, or ShapeType::kDynamicSize otherwise.
+ dimension is statically known, or ShapeType::kDynamic otherwise.
}],
/*retTy=*/"SmallVector<int64_t>",
/*methodName=*/"getStaticShape",
/*desc=*/[{
Returns the statically-known loop ranges. Composes
`getShapesToLoopsMap()` with the result of `getStaticShape`.
- Returns ShapeType::kDynamicSize for non-statically-known loop ranges.
+ Returns ShapeType::kDynamic for non-statically-known loop ranges.
This is expected to be called by a valid Linalg op
}],
/*retTy=*/"SmallVector<int64_t, 4>",
static split point attribute when it is known at transform IR construction
time or as the handle to an operation producing a single index-typed value
when it is computed by payload IR. In the latter case, the static split
- point must be set to `ShapedType::kDynamicSize` and the dynamic size handle
+ point must be set to `ShapedType::kDynamic` and the dynamic size handle
must point to as many value-producing operations as there are structured
operations pointed to by the target handle.
case the tile value must be computed by the payload IR and the handle to the
operation computing it must be provided through `dynamic_sizes`. When the
sizes are not known statically, the corresponding entry in the
- `static_sizes` attribute must be set to `ShapedType::kDynamicSize`. Only
+ `static_sizes` attribute must be set to `ShapedType::kDynamic`. Only
the dynamic sizes must be provided in `dynamic_sizes`, i.e., there should
- be as many handles as `ShapedType::kDynamicSize` values in the
+ be as many handles as `ShapedType::kDynamic` values in the
`static_sizes` attribute. A static size of `0` indicates that the dimension
should not be tiled. No loop will be generated for such dimensions. If all
tile sizes are `0`, this transform is effectively a no-op.
$target oilist(
`num_threads` custom<DynamicIndexList>($num_threads,
$static_num_threads,
- "ShapedType::kDynamicSize") |
+ "ShapedType::kDynamic") |
`tile_sizes` custom<DynamicIndexList>($tile_sizes,
$static_tile_sizes,
- "ShapedType::kDynamicSize"))
+ "ShapedType::kDynamic"))
(`(` `mapping` `=` $mapping^ `)`)? attr-dict
}];
let hasVerifier = 1;
case the tile value must be computed by the payload IR and the handle to the
operation computing it must be provided through `dynamic_sizes`. When the
sizes are not known statically, the corresponding entry in the
- `static_sizes` attribute must be set to `ShapedType::kDynamicSize`. Only
+ `static_sizes` attribute must be set to `ShapedType::kDynamic`. Only
the dynamic sizes must be provided in `dynamic_sizes`, i.e., there should
- be as many handles as `ShapedType::kDynamicSize` values in the
+ be as many handles as `ShapedType::kDynamic` values in the
`static_sizes` attribute. A static size of `0` indicates that the dimension
should not be tiled. No loop will be generated for such dimensions. If all
tile sizes are `0`, this transform is effectively a no-op.
let assemblyFormat = [{
$source `to` `offset` `` `:`
custom<DynamicIndexList>($offsets, $static_offsets,
- "ShapedType::kDynamicStrideOrOffset")
+ "ShapedType::kDynamic")
`` `,` `sizes` `` `:`
custom<DynamicIndexList>($sizes, $static_sizes,
- "ShapedType::kDynamicSize")
+ "ShapedType::kDynamic")
`` `,` `strides` `` `:`
custom<DynamicIndexList>($strides, $static_strides,
- "ShapedType::kDynamicStrideOrOffset")
+ "ShapedType::kDynamic")
attr-dict `:` type($source) `to` type($result)
}];
The representation based on offsets, sizes and strides support a
partially-static specification via attributes specified through the
`static_offsets`, `static_sizes` and `static_strides` arguments. A special
- sentinel value ShapedType::kDynamicSize and
- ShapedType::kDynamicStrideOrOffset encodes that the corresponding entry has
+ sentinel value ShapedType::kDynamic and
+ ShapedType::kDynamic encodes that the corresponding entry has
a dynamic value.
A subview operation may additionally reduce the rank of the resulting view
let assemblyFormat = [{
$source ``
custom<DynamicIndexList>($offsets, $static_offsets,
- "ShapedType::kDynamicStrideOrOffset")
+ "ShapedType::kDynamic")
custom<DynamicIndexList>($sizes, $static_sizes,
- "ShapedType::kDynamicSize")
+ "ShapedType::kDynamic")
custom<DynamicIndexList>($strides, $static_strides,
- "ShapedType::kDynamicStrideOrOffset")
+ "ShapedType::kDynamic")
attr-dict `:` type($source) `to` type($result)
}];
/// Alias type for extent tensors.
RankedTensorType getExtentTensorType(MLIRContext *ctx,
- int64_t rank = ShapedType::kDynamicSize);
+ int64_t rank = ShapedType::kDynamic);
// Check if a type is an extent tensor, e.g., tensor<?xindex>.
bool isExtentTensorType(Type);
def Shape_ExtentTensorType :
1DTensorOf<[Index]>,
- BuildableType<"::mlir::RankedTensorType::get({ShapedType::kDynamicSize}, "
+ BuildableType<"::mlir::RankedTensorType::get({ShapedType::kDynamic}, "
"$_builder.getType<::mlir::IndexType>())"> {
let description = [{
The extent tensor is a tensor of rank one with arbitrarily many index
The representation based on offsets, sizes and strides support a
partially-static specification via attributes specified through the
`static_offsets`, `static_sizes` and `static_strides` arguments. A special
- sentinel value ShapedType::kDynamicSize and
- ShapedType::kDynamicStrideOrOffset encodes that the corresponding entry has
+ sentinel value ShapedType::kDynamic and
+ ShapedType::kDynamic encodes that the corresponding entry has
a dynamic value.
After buffer allocation, the "extract_slice" op is expected to lower into a
let assemblyFormat = [{
$source ``
custom<DynamicIndexList>($offsets, $static_offsets,
- "ShapedType::kDynamicStrideOrOffset")
+ "ShapedType::kDynamic")
custom<DynamicIndexList>($sizes, $static_sizes,
- "ShapedType::kDynamicSize")
+ "ShapedType::kDynamic")
custom<DynamicIndexList>($strides, $static_strides,
- "ShapedType::kDynamicStrideOrOffset")
+ "ShapedType::kDynamic")
attr-dict `:` type($source) `to` type($result)
}];
The representation based on offsets, sizes and strides support a
partially-static specification via attributes specified through the
`static_offsets`, `static_sizes` and `static_strides` arguments. A special
- sentinel value ShapedType::kDynamicSize and
- ShapedType::kDynamicStrideOrOffset encodes that the corresponding entry has
+ sentinel value ShapedType::kDynamic and
+ ShapedType::kDynamic encodes that the corresponding entry has
a dynamic value.
After buffer allocation, the "insert_slice" op is expected to lower into a
let assemblyFormat = [{
$source `into` $dest ``
custom<DynamicIndexList>($offsets, $static_offsets,
- "ShapedType::kDynamicStrideOrOffset")
+ "ShapedType::kDynamic")
custom<DynamicIndexList>($sizes, $static_sizes,
- "ShapedType::kDynamicSize")
+ "ShapedType::kDynamic")
custom<DynamicIndexList>($strides, $static_strides,
- "ShapedType::kDynamicStrideOrOffset")
+ "ShapedType::kDynamic")
attr-dict `:` type($source) `into` type($dest)
}];
$source
(`nofold` $nofold^)?
`low` `` custom<DynamicIndexList>($low, $static_low,
- "ShapedType::kDynamicSize")
+ "ShapedType::kDynamic")
`high` `` custom<DynamicIndexList>($high, $static_high,
- "ShapedType::kDynamicSize")
+ "ShapedType::kDynamic")
$region attr-dict `:` type($source) `to` type($result)
}];
The representation based on offsets, sizes and strides support a
partially-static specification via attributes specified through the
`static_offsets`, `static_sizes` and `static_strides` arguments. A special
- sentinel value ShapedType::kDynamicSize and
- ShapedType::kDynamicStrideOrOffset encodes that the corresponding entry has
+ sentinel value ShapedType::kDynamic and
+ ShapedType::kDynamic encodes that the corresponding entry has
a dynamic value.
After buffer allocation, the "parallel_insert_slice" op is expected to lower
let assemblyFormat = [{
$source `into` $dest ``
custom<DynamicIndexList>($offsets, $static_offsets,
- "ShapedType::kDynamicStrideOrOffset")
+ "ShapedType::kDynamic")
custom<DynamicIndexList>($sizes, $static_sizes,
- "ShapedType::kDynamicSize")
+ "ShapedType::kDynamic")
custom<DynamicIndexList>($strides, $static_strides,
- "ShapedType::kDynamicStrideOrOffset")
+ "ShapedType::kDynamic")
attr-dict `:` type($source) `into` type($dest)
}];
return result;
result.hasRank = true;
- result.sizes.resize(lhs.sizes.size(), ShapedType::kDynamicSize);
+ result.sizes.resize(lhs.sizes.size(), ShapedType::kDynamic);
for (auto i : llvm::seq<unsigned>(0, result.sizes.size())) {
int64_t lhsSize = lhs.sizes[i];
int64_t rhsSize = rhs.sizes[i];
int64_t &resultSize = result.sizes[i];
- if (lhsSize == ShapedType::kDynamicSize) {
+ if (lhsSize == ShapedType::kDynamic) {
resultSize = rhsSize;
- } else if (rhsSize == ShapedType::kDynamicSize) {
+ } else if (rhsSize == ShapedType::kDynamic) {
resultSize = lhsSize;
} else if (lhsSize == rhsSize) {
resultSize = lhsSize;
}
result.hasRank = true;
- result.sizes.resize(lhs.sizes.size(), ShapedType::kDynamicSize);
+ result.sizes.resize(lhs.sizes.size(), ShapedType::kDynamic);
for (int i = 0, e = lhs.sizes.size(); i < e; i++) {
if (lhs.sizes[i] == rhs.sizes[i]) {
result.sizes[i] = lhs.sizes[i];
// Whether the value has known rank.
bool hasRank;
// If `hasRank`, the sizes along each rank. Unknown sizes are represented as
- // `ShapedType::kDynamicSize`.
+ // `ShapedType::kDynamic`.
llvm::SmallVector<int64_t> sizes;
// The dtype of a tensor.
// This is equal to nullptr if we don't know that it is a specific concrete
namespace mlir {
-/// Given a list of strides (in which MemRefType::getDynamicStrideOrOffset()
+/// Given a list of strides (in which ShapedType::kDynamic
/// represents a dynamic value), return the single result AffineMap which
/// represents the linearized strided layout map. Dimensions correspond to the
/// offset followed by the strides in order. Symbols are inserted for each
Strides must be positive and the offset must be non-negative. Both the
strides and the offset may be _dynamic_, i.e. their value may not be known
at compile time. This is expressed as a `?` in the assembly syntax and as
- `ShapedType::kDynamicStrideOrOffset` in the code. Stride and offset values
+ `ShapedType::kDynamic` in the code. Stride and offset values
must satisfy the constraints above at runtime, the behavior is undefined
otherwise.
A shape is a list of sizes corresponding to the dimensions of the container.
If the number of dimensions in the shape is unknown, the shape is "unranked".
If the number of dimensions is known, the shape "ranked". The sizes of the
- dimensions of the shape must be positive, or kDynamicSize (in which case the
+ dimensions of the shape must be positive, or kDynamic (in which case the
size of the dimension is dynamic, or not statically known).
}];
let methods = [
];
let extraClassDeclaration = [{
- // TODO: merge these two special values in a single one used everywhere.
- // Unfortunately, uses of `-1` have crept deep into the codebase now and are
- // hard to track.
- static constexpr int64_t kDynamicSize =
- std::numeric_limits<int64_t>::min();
- static constexpr int64_t kDynamicStrideOrOffset =
+ static constexpr int64_t kDynamic =
std::numeric_limits<int64_t>::min();
/// Whether the given dimension size indicates a dynamic dimension.
- static constexpr bool isDynamic(int64_t dSize) {
- return dSize == kDynamicSize;
+ static constexpr bool isDynamic(int64_t dValue) {
+ return dValue == kDynamic;
}
+
/// Whether the given shape has any size that indicates a dynamic dimension.
static bool isDynamicShape(ArrayRef<int64_t> dSizes) {
return any_of(dSizes, [](int64_t dSize) { return isDynamic(dSize); });
}
- static constexpr bool isDynamicStrideOrOffset(int64_t dStrideOrOffset) {
- return dStrideOrOffset == kDynamicStrideOrOffset;
- }
/// Return the number of elements present in the given shape.
static int64_t getNumElements(ArrayRef<int64_t> shape);
/// symbols.
///
/// A stride specification is a list of integer values that are either static
-/// or dynamic (encoded with ShapedType::kDynamicStrideOrOffset). Strides encode
+/// or dynamic (encoded with ShapedType::kDynamic). Strides encode
/// the distance in the number of elements between successive entries along a
/// particular dimension.
LogicalResult getStridesAndOffset(MemRefType t,
/// New `Attribute getMemorySpace()` method should be used instead.
unsigned getMemorySpaceAsInt() const;
- // TODO: merge these two special values in a single one used everywhere.
- // Unfortunately, uses of `-1` have crept deep into the codebase now and are
- // hard to track.
- static int64_t getDynamicStrideOrOffset() {
- return ShapedType::kDynamicStrideOrOffset;
- }
}];
let skipDefaultBuilders = 1;
let genVerifyDecl = 1;
/// The components consist of
/// - A ranked or unranked shape with the dimension specification match those
/// of ShapeType's getShape() (e.g., dynamic dimension represented using
-/// ShapedType::kDynamicSize)
+/// ShapedType::kDynamic)
/// - A element type, may be unset (nullptr)
/// - A attribute, may be unset (nullptr)
/// Used by ShapedType type inferences.
`getArrayAttrMaxRanks()`[0] (resp. [1], [2]).
3. if an entry of `static_offsets` (resp. `static_sizes`,
`static_strides`) is equal to a special sentinel value, namely
- `ShapedType::kDynamicStrideOrOffset` (resp. `ShapedType::kDynamicSize`,
- `ShapedType::kDynamicStrideOrOffset`), then the corresponding entry is
+ `ShapedType::kDynamic` (resp. `ShapedType::kDynamic`,
+ `ShapedType::kDynamic`), then the corresponding entry is
a dynamic offset (resp. size, stride).
4. a variadic `offset` (resp. `sizes`, `strides`) operand must be present
for each dynamic offset (resp. size, stride).
/*defaultImplementation=*/[{
::llvm::APInt v = *(static_offsets()
.template getAsValueRange<::mlir::IntegerAttr>().begin() + idx);
- return ::mlir::ShapedType::isDynamicStrideOrOffset(v.getSExtValue());
+ return ::mlir::ShapedType::isDynamic(v.getSExtValue());
}]
>,
InterfaceMethod<
/*defaultImplementation=*/[{
::llvm::APInt v = *(static_strides()
.template getAsValueRange<::mlir::IntegerAttr>().begin() + idx);
- return ::mlir::ShapedType::isDynamicStrideOrOffset(v.getSExtValue());
+ return ::mlir::ShapedType::isDynamic(v.getSExtValue());
}]
>,
InterfaceMethod<
assert($_op.isDynamicOffset(idx) && "expected dynamic offset");
auto numDynamic = getNumDynamicEntriesUpToIdx(
static_offsets().template cast<::mlir::ArrayAttr>(),
- ::mlir::ShapedType::isDynamicStrideOrOffset,
+ ::mlir::ShapedType::isDynamic,
idx);
return $_op.getOffsetSizeAndStrideStartOperandIndex() + numDynamic;
}]
assert($_op.isDynamicStride(idx) && "expected dynamic stride");
auto numDynamic = getNumDynamicEntriesUpToIdx(
static_strides().template cast<::mlir::ArrayAttr>(),
- ::mlir::ShapedType::isDynamicStrideOrOffset,
+ ::mlir::ShapedType::isDynamic,
idx);
return $_op.getOffsetSizeAndStrideStartOperandIndex() +
offsets().size() + sizes().size() + numDynamic;
// fit into int64_t limits.
auto parseStrideOrOffset = [&]() -> Optional<int64_t> {
if (consumeIf(Token::question))
- return ShapedType::kDynamicStrideOrOffset;
+ return ShapedType::kDynamic;
SMLoc loc = getToken().getLoc();
auto emitWrongTokenError = [&] {
if (consumeIf(Token::question)) {
if (!allowDynamic)
return emitError(loc, "expected static shape");
- dimensions.push_back(ShapedType::kDynamicSize);
+ dimensions.push_back(ShapedType::kDynamic);
} else {
int64_t value;
if (failed(parseIntegerInDimensionList(value)))
return unwrap(type).cast<ShapedType>().getDimSize(static_cast<unsigned>(dim));
}
-int64_t mlirShapedTypeGetDynamicSize() { return ShapedType::kDynamicSize; }
+int64_t mlirShapedTypeGetDynamicSize() { return ShapedType::kDynamic; }
bool mlirShapedTypeIsDynamicSize(int64_t size) {
return ShapedType::isDynamic(size);
}
bool mlirShapedTypeIsDynamicStrideOrOffset(int64_t val) {
- return ShapedType::isDynamicStrideOrOffset(val);
+ return ShapedType::isDynamic(val);
}
int64_t mlirShapedTypeGetDynamicStrideOrOffset() {
- return ShapedType::kDynamicStrideOrOffset;
+ return ShapedType::kDynamic;
}
//===----------------------------------------------------------------------===//
size_t i = pair.index();
Value index = pair.value();
Value strideOp;
- if (ShapedType::isDynamicStrideOrOffset(strides[i])) {
+ if (ShapedType::isDynamic(strides[i])) {
strideOp = rewriter.create<LLVM::MulOp>(
loc, memrefDescriptor.stride(rewriter, loc, i), byteWidthConst);
} else {
Value sgprOffset = adaptor.getSgprOffset();
if (!sgprOffset)
sgprOffset = createI32Constant(rewriter, loc, 0);
- if (ShapedType::isDynamicStrideOrOffset(offset))
+ if (ShapedType::isDynamic(offset))
sgprOffset = rewriter.create<LLVM::AddOp>(
loc, memrefDescriptor.offset(rewriter, loc), sgprOffset);
else if (offset > 0)
auto result = getStridesAndOffset(type, strides, offset);
(void)result;
assert(succeeded(result) && "unexpected failure in stride computation");
- assert(!ShapedType::isDynamicStrideOrOffset(offset) &&
+ assert(!ShapedType::isDynamic(offset) &&
"expected static offset");
- assert(!llvm::any_of(strides, ShapedType::isDynamicStrideOrOffset) &&
+ assert(!llvm::any_of(strides, ShapedType::isDynamic) &&
"expected static strides");
auto convertedType = typeConverter.convertType(type);
Value index;
if (offset != 0) // Skip if offset is zero.
- index = ShapedType::isDynamicStrideOrOffset(offset)
+ index = ShapedType::isDynamic(offset)
? memRefDescriptor.offset(rewriter, loc)
: createIndexConstant(rewriter, loc, offset);
for (int i = 0, e = indices.size(); i < e; ++i) {
Value increment = indices[i];
if (strides[i] != 1) { // Skip if stride is 1.
- Value stride = ShapedType::isDynamicStrideOrOffset(strides[i])
+ Value stride = ShapedType::isDynamic(strides[i])
? memRefDescriptor.stride(rewriter, loc, i)
: createIndexConstant(rewriter, loc, strides[i]);
increment = rewriter.create<LLVM::MulOp>(loc, increment, stride);
SmallVectorImpl<Value> &strides, Value &sizeBytes) const {
assert(isConvertibleAndHasIdentityMaps(memRefType) &&
"layout maps must have been normalized away");
- assert(count(memRefType.getShape(), ShapedType::kDynamicSize) ==
+ assert(count(memRefType.getShape(), ShapedType::kDynamic) ==
static_cast<ssize_t>(dynamicSizes.size()) &&
"dynamicSizes size doesn't match dynamic sizes count in memref shape");
sizes.reserve(memRefType.getRank());
unsigned dynamicIndex = 0;
for (int64_t size : memRefType.getShape()) {
- sizes.push_back(size == ShapedType::kDynamicSize
+ sizes.push_back(size == ShapedType::kDynamic
? dynamicSizes[dynamicIndex++]
: createIndexConstant(rewriter, loc, size));
}
if (size == 0)
continue;
bool useSizeAsStride = stride == 1;
- if (size == ShapedType::kDynamicSize)
- stride = ShapedType::kDynamicSize;
- if (stride != ShapedType::kDynamicSize)
+ if (size == ShapedType::kDynamic)
+ stride = ShapedType::kDynamic;
+ if (stride != ShapedType::kDynamic)
stride *= size;
if (useSizeAsStride)
runningStride = sizes[i];
- else if (stride == ShapedType::kDynamicSize)
+ else if (stride == ShapedType::kDynamic)
runningStride =
rewriter.create<LLVM::MulOp>(loc, runningStride, sizes[i]);
else
return false;
for (int64_t stride : strides)
- if (ShapedType::isDynamicStrideOrOffset(stride))
+ if (ShapedType::isDynamic(stride))
return false;
- return !ShapedType::isDynamicStrideOrOffset(offset);
+ return !ShapedType::isDynamic(offset);
}
/// Convert a memref type to a bare pointer to the memref element type.
static MemRefType makeStridedLayoutDynamic(MemRefType type) {
return MemRefType::Builder(type).setLayout(StridedLayoutAttr::get(
- type.getContext(), ShapedType::kDynamicStrideOrOffset,
- SmallVector<int64_t>(type.getRank(),
- ShapedType::kDynamicStrideOrOffset)));
+ type.getContext(), ShapedType::kDynamic,
+ SmallVector<int64_t>(type.getRank(), ShapedType::kDynamic)));
}
/// Helper function to extract the operand types that are passed to the
namespace {
bool isStaticStrideOrOffset(int64_t strideOrOffset) {
- return !ShapedType::isDynamicStrideOrOffset(strideOrOffset);
+ return !ShapedType::isDynamic(strideOrOffset);
}
LLVM::LLVMFuncOp getFreeFn(LLVMTypeConverter *typeConverter, ModuleOp module) {
[&](MemRefType type, function_ref<Value()> getDynamicSize) -> Value {
// Compute number of elements.
int64_t size = type.getShape()[0];
- Value numElements = ((size == ShapedType::kDynamicSize)
+ Value numElements = ((size == ShapedType::kDynamic)
? getDynamicSize()
: createIndexConstant(rewriter, loc, size));
Type indexType = getIndexType();
Value stride = nullptr;
int64_t targetRank = targetMemRefType.getRank();
for (auto i : llvm::reverse(llvm::seq<int64_t>(0, targetRank))) {
- if (!ShapedType::isDynamicStrideOrOffset(strides[i])) {
+ if (!ShapedType::isDynamic(strides[i])) {
// If the stride for this dimension is dynamic, then use the product
// of the sizes of the inner dimensions.
stride = createIndexConstant(rewriter, loc, strides[i]);
// Offset.
auto llvmIndexType = typeConverter->convertType(rewriter.getIndexType());
- if (!ShapedType::isDynamicStrideOrOffset(offset)) {
+ if (!ShapedType::isDynamic(offset)) {
targetMemRef.setConstantOffset(rewriter, loc, offset);
} else {
Value baseOffset = sourceMemRef.offset(rewriter, loc);
// constants.
int64_t staticSize =
subViewOp.getSource().getType().cast<MemRefType>().getShape()[i];
- if (staticSize != ShapedType::kDynamicSize) {
+ if (staticSize != ShapedType::kDynamic) {
size = rewriter.create<LLVM::ConstantOp>(
loc, llvmIndexType, rewriter.getI64IntegerAttr(staticSize));
} else {
: rewriter.create<LLVM::ConstantOp>(
loc, llvmIndexType,
rewriter.getI64IntegerAttr(subViewOp.getStaticSize(i)));
- if (!ShapedType::isDynamicStrideOrOffset(strides[i])) {
+ if (!ShapedType::isDynamic(strides[i])) {
stride = rewriter.create<LLVM::ConstantOp>(
loc, llvmIndexType, rewriter.getI64IntegerAttr(strides[i]));
} else {
ArrayRef<int64_t> strides, Value nextSize,
Value runningStride, unsigned idx) const {
assert(idx < strides.size());
- if (!ShapedType::isDynamicStrideOrOffset(strides[idx]))
+ if (!ShapedType::isDynamic(strides[idx]))
return createIndexConstant(rewriter, loc, strides[idx]);
if (nextSize)
return runningStride
bool isDynamic) {
if (isDynamic) {
// TODO (natashaknk): Make dynamic intermediate shape not always be rank-1
- intermediateShape = {ShapedType::kDynamicSize};
+ intermediateShape = {ShapedType::kDynamic};
return true;
}
SmallVector<int64_t, 2> genericShape;
for (int i = 0; i < rank; i++) {
int64_t dim = multiples[i];
- genericShape.push_back(dim == -1 ? ShapedType::kDynamicSize : dim);
+ genericShape.push_back(dim == -1 ? ShapedType::kDynamic : dim);
genericShape.push_back(inputShape[i]);
}
for (const auto &i : llvm::enumerate(sliceOp.getSize())) {
int64_t size = i.value().cast<IntegerAttr>().getInt();
size_t index = i.index();
- sizes.push_back(size == -1 ? ShapedType::kDynamicSize : size);
+ sizes.push_back(size == -1 ? ShapedType::kDynamic : size);
if (!ShapedType::isDynamic(sizes.back()))
continue;
strides.back() != 1)
return llvm::None;
int64_t stride = strides[strides.size() - 2];
- if (stride == ShapedType::kDynamicStrideOrOffset)
+ if (stride == ShapedType::kDynamic)
return llvm::None;
return stride;
}
#include "mlir/Dialect/Vector/Transforms/VectorTransforms.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/TypeUtilities.h"
-#include "mlir/Support/MathExtras.h"
#include "mlir/Target/LLVMIR/TypeToLLVM.h"
#include "mlir/Transforms/DialectConversion.h"
auto sizes = memRefType.getShape();
for (int index = 0, e = strides.size() - 1; index < e; ++index) {
if (ShapedType::isDynamic(sizes[index + 1]) ||
- ShapedType::isDynamicStrideOrOffset(strides[index]) ||
- ShapedType::isDynamicStrideOrOffset(strides[index + 1]))
+ ShapedType::isDynamic(strides[index]) ||
+ ShapedType::isDynamic(strides[index + 1]))
return None;
if (strides[index] != strides[index + 1] * sizes[index + 1])
return None;
if (!targetStrides)
return failure();
// Only support static strides for now, regardless of contiguity.
- if (llvm::any_of(*targetStrides, ShapedType::isDynamicStrideOrOffset))
+ if (llvm::any_of(*targetStrides, ShapedType::isDynamic))
return failure();
auto int64Ty = IntegerType::get(rewriter.getContext(), 64);
// If no constant bound is found, then it can always be bound by the
// memref's dim size if the latter has a constant size along this dim.
auto dimSize = memRefType.getDimSize(d);
- if (dimSize == ShapedType::kDynamicSize)
+ if (dimSize == ShapedType::kDynamic)
return None;
diffConstant = dimSize;
// Lower bound becomes 0.
// Put together alloc operands for any dynamic dimensions of the memref.
SmallVector<Value, 4> allocOperands;
for (const auto &dim : llvm::enumerate(oldMemRefType.getShape())) {
- if (dim.value() == ShapedType::kDynamicSize)
+ if (dim.value() == ShapedType::kDynamic)
allocOperands.push_back(bOuter.createOrFold<memref::DimOp>(
forOp.getLoc(), oldMemRef, dim.index()));
}
bool isDynDim =
isNormalizedMemRefDynamicDim(d, layoutMap, memrefTypeDynDims, context);
if (isDynDim) {
- newShape[d] = ShapedType::kDynamicSize;
+ newShape[d] = ShapedType::kDynamic;
} else {
// The lower bound for the shape is always zero.
Optional<int64_t> ubConst =
auto memorySpaceAttr = IntegerAttr::get(
IntegerType::get(tensorType.getContext(), 64), memorySpace);
auto rankedTensorType = tensorType.cast<RankedTensorType>();
- int64_t dynamicOffset = ShapedType::kDynamicStrideOrOffset;
+ int64_t dynamicOffset = ShapedType::kDynamic;
SmallVector<int64_t> dynamicStrides(rankedTensorType.getRank(),
- ShapedType::kDynamicStrideOrOffset);
+ ShapedType::kDynamic);
auto stridedLayout = StridedLayoutAttr::get(tensorType.getContext(),
dynamicOffset, dynamicStrides);
return MemRefType::get(rankedTensorType.getShape(),
failed(getStridesAndOffset(target, targetStrides, targetOffset)))
return false;
auto dynamicToStatic = [](int64_t a, int64_t b) {
- return a == MemRefType::getDynamicStrideOrOffset() &&
- b != MemRefType::getDynamicStrideOrOffset();
+ return ShapedType::isDynamic(a) && !ShapedType::isDynamic(b);
};
if (dynamicToStatic(sourceOffset, targetOffset))
return false;
auto loc = value.getLoc();
SmallVector<Value, 4> dynamicOperands;
for (int i = 0; i < destType.getRank(); ++i) {
- if (destType.getShape()[i] != ShapedType::kDynamicSize)
+ if (destType.getShape()[i] != ShapedType::kDynamic)
continue;
auto index = b.createOrFold<arith::ConstantIndexOp>(loc, i);
Value size = b.create<memref::DimOp>(loc, value, index);
SmallVector<int64_t, 4> strides;
if (failed(getStridesAndOffset(type, strides, offset)))
return false;
- if (!llvm::all_of(strides, ShapedType::isDynamicStrideOrOffset))
+ if (!llvm::all_of(strides, ShapedType::isDynamic))
return false;
- if (!ShapedType::isDynamicStrideOrOffset(offset))
+ if (!ShapedType::isDynamic(offset))
return false;
return true;
}
// We parsed a generic dimension list, but vectors only support two forms:
// - single non-dynamic entry in the list (fixed vector);
- // - two elements, the first dynamic (indicated by ShapedType::kDynamicSize)
+ // - two elements, the first dynamic (indicated by ShapedType::kDynamic)
// and the second
// non-dynamic (scalable vector).
if (dims.empty() || dims.size() > 2 ||
}
staticSplitPoint =
- parser.getBuilder().getI64IntegerAttr(ShapedType::kDynamicSize);
+ parser.getBuilder().getI64IntegerAttr(ShapedType::kDynamic);
}
result.addAttribute(
void SplitOp::print(OpAsmPrinter &printer) {
printer << " " << getTarget() << " after ";
int64_t staticSplitSize = static_cast<int64_t>(getStaticSplitPoint());
- if (staticSplitSize != ShapedType::kDynamicSize)
+ if (staticSplitSize != ShapedType::kDynamic)
printer << staticSplitSize;
else
printer << getDynamicSplitPoint();
LogicalResult SplitOp::verify() {
if ((static_cast<int64_t>(getStaticSplitPoint()) !=
- ShapedType::kDynamicSize) ^
+ ShapedType::kDynamic) ^
(getDynamicSplitPoint() == nullptr)) {
return emitOpError() << "expects either a dynamic or a static split "
"point to be provided";
unsigned dynamicPos = 0;
Builder builder(getContext());
for (int64_t size : tileSizes) {
- if (size == ShapedType::kDynamicSize) {
+ if (size == ShapedType::kDynamic) {
results.push_back(dynamic[dynamicPos++]);
} else {
results.push_back(builder.getIndexAttr(size));
if (parser.parseOperand(target) ||
parser.resolveOperand(target, pdlOperationType, result.operands) ||
parseDynamicIndexList(parser, dynamicSizes, staticSizes,
- ShapedType::kDynamicSize) ||
+ ShapedType::kDynamic) ||
parser.resolveOperands(dynamicSizes, pdlOperationType, result.operands) ||
parser.parseOptionalAttrDict(result.attributes))
return ParseResult::failure();
void TileOp::print(OpAsmPrinter &p) {
p << ' ' << getTarget();
printDynamicIndexList(p, getOperation(), getDynamicSizes(), getStaticSizes(),
- ShapedType::kDynamicSize);
+ ShapedType::kDynamic);
p.printOptionalAttrDict((*this)->getAttrs(), {getStaticSizesAttrName()});
}
SmallVector<int64_t> staticTileSizes;
SmallVector<Value> dynamicTileSizes;
dispatchIndexOpFoldResults(mixedTileSizes, dynamicTileSizes, staticTileSizes,
- ShapedType::kDynamicSize);
+ ShapedType::kDynamic);
// Call the default builder which sets up the proper operands segment sizes
// attributes for multiple variadic operands. In the absence of this, horrible
// bugs ensue.
SmallVector<int64_t> staticNumThreads;
SmallVector<Value> dynamicNumThreads;
dispatchIndexOpFoldResults(mixedNumThreads, dynamicNumThreads,
- staticNumThreads, ShapedType::kDynamicSize);
+ staticNumThreads, ShapedType::kDynamic);
// Call the default builder which sets up the proper operands segment sizes
// attributes for multiple variadic operands. In the absence of this, horrible
// bugs ensue.
unsigned dynamicPos = 0;
Builder builder(getContext());
for (int64_t size : tileSizes) {
- if (size == ShapedType::kDynamicSize) {
+ if (size == ShapedType::kDynamic) {
results.push_back(dynamic[dynamicPos++]);
} else {
results.push_back(builder.getIndexAttr(size));
if (parser.parseOperand(target) ||
parser.resolveOperand(target, pdlOperationType, result.operands) ||
parseDynamicIndexList(parser, dynamicSizes, staticSizes,
- ShapedType::kDynamicSize) ||
+ ShapedType::kDynamic) ||
parser.resolveOperands(dynamicSizes, pdlOperationType, result.operands) ||
parser.parseOptionalAttrDict(result.attributes))
return ParseResult::failure();
void TileToScfForOp::print(OpAsmPrinter &p) {
p << ' ' << getTarget();
printDynamicIndexList(p, getOperation(), getDynamicSizes(), getStaticSizes(),
- ShapedType::kDynamicSize);
+ ShapedType::kDynamic);
p.printOptionalAttrDict((*this)->getAttrs(), {getStaticSizesAttrName()});
}
modifiedOutput = true;
SmallVector<Value> dynamicDims;
for (const auto &dim : llvm::enumerate(operandType.getShape())) {
- if (dim.value() != ShapedType::kDynamicSize)
+ if (dim.value() != ShapedType::kDynamic)
continue;
dynamicDims.push_back(rewriter.createOrFold<tensor::DimOp>(
loc, operandVal, dim.index()));
continue;
unsigned rank = tensorType.getRank();
SmallVector<int64_t, 4> staticOffsetsVector(
- rank, ShapedType::kDynamicStrideOrOffset);
- SmallVector<int64_t, 4> staticSizesVector(rank, ShapedType::kDynamicSize);
+ rank, ShapedType::kDynamic);
+ SmallVector<int64_t, 4> staticSizesVector(rank, ShapedType::kDynamic);
SmallVector<int64_t, 4> staticStridesVector(
- rank, ShapedType::kDynamicStrideOrOffset);
+ rank, ShapedType::kDynamic);
resultTypes.push_back(tensor::ExtractSliceOp::inferResultType(
tensorType, staticOffsetsVector, staticSizesVector,
staticStridesVector));
// Create the packed tensor<?x?x..?xtransposedShape> into which we amortize
// padding.
- SmallVector<int64_t> packedShape(nPackedLoops, ShapedType::kDynamicSize);
+ SmallVector<int64_t> packedShape(nPackedLoops, ShapedType::kDynamic);
// TODO: go grab dims when necessary, for now tensor::PadOp returns a static
// tensor.
llvm::append_range(packedShape, transposedTensorType->getShape());
// Fallback dynamic buffer.
auto dynamicBufferType =
- MemRefType::get(ShapedType::kDynamicSize, b.getIntegerType(8));
+ MemRefType::get(ShapedType::kDynamic, b.getIntegerType(8));
Value mul = b.createOrFold<arith::MulIOp>(
b.create<arith::ConstantIndexOp>(width), allocSize);
if (options.useAlloca)
Value buffer = allocBuffer(b, options, viewType.getElementType(), allocSize,
layout, alignment);
SmallVector<int64_t, 4> dynSizes(boundingSubViewSize.size(),
- ShapedType::kDynamicSize);
+ ShapedType::kDynamic);
Value view = b.createOrFold<memref::ViewOp>(
MemRefType::get(dynSizes, viewType.getElementType()), buffer, zero,
boundingSubViewSize);
partialSizes.push_back(
b.createOrFold<memref::DimOp>(loc, subView, resultDimIdx++));
}
- SmallVector<int64_t, 4> dynSizes(fullSizes.size(), ShapedType::kDynamicSize);
+ SmallVector<int64_t, 4> dynSizes(fullSizes.size(), ShapedType::kDynamic);
// If a callback is not specified, then use the default implementation for
// allocating the promoted buffer.
Optional<Value> fullLocalView = allocationFn(b, subView, fullSizes, layout);
unsigned reductionDim = dims[0];
SmallVector<int64_t, 4> loopRanges = op.getStaticLoopRanges();
int64_t reductionDimSize = loopRanges[reductionDim];
- if (reductionDimSize == ShapedType::kDynamicSize ||
+ if (reductionDimSize == ShapedType::kDynamic ||
reductionDimSize % ratio != 0)
return b.notifyMatchFailure(
op, "Reduction dimension not divisible by split ratio");
unsigned reductionDimPos = dims[0];
SmallVector<int64_t> loopRanges = op.getStaticLoopRanges();
int64_t reductionDimSize = loopRanges[reductionDimPos];
- if (reductionDimSize == ShapedType::kDynamicSize ||
+ if (reductionDimSize == ShapedType::kDynamic ||
reductionDimSize % splitFactor != 0 ||
insertSplitDimension >= loopRanges.size())
return b.notifyMatchFailure(
for (int64_t idx : llvm::seq<int64_t>(0, oldShape.size() + 1)) {
if (idx == insertSplitDimension) {
dispatchIndexOpFoldResults(sizes[idx], dynamicDims, newOutputShape,
- ShapedType::kDynamicStrideOrOffset);
+ ShapedType::kDynamic);
continue;
}
int64_t oldIdx = idx < insertSplitDimension ? idx : idx - 1;
SmallVector<Value, 4> dynOperands;
auto shapedType = val.getType().cast<ShapedType>();
for (const auto &dim : llvm::enumerate(shapedType.getShape())) {
- if (dim.value() == ShapedType::kDynamicSize)
+ if (dim.value() == ShapedType::kDynamic)
dynOperands.push_back(createOrFoldDimOp(b, loc, val, dim.index()));
}
return dynOperands;
namespace saturated_arith {
struct Wrapper {
static Wrapper stride(int64_t v) {
- return (ShapedType::isDynamicStrideOrOffset(v)) ? Wrapper{true, 0}
+ return (ShapedType::isDynamic(v)) ? Wrapper{true, 0}
: Wrapper{false, v};
}
static Wrapper offset(int64_t v) {
- return (ShapedType::isDynamicStrideOrOffset(v)) ? Wrapper{true, 0}
+ return (ShapedType::isDynamic(v)) ? Wrapper{true, 0}
: Wrapper{false, v};
}
static Wrapper size(int64_t v) {
return (ShapedType::isDynamic(v)) ? Wrapper{true, 0} : Wrapper{false, v};
}
int64_t asOffset() {
- return saturated ? ShapedType::kDynamicStrideOrOffset : v;
+ return saturated ? ShapedType::kDynamic : v;
}
- int64_t asSize() { return saturated ? ShapedType::kDynamicSize : v; }
+ int64_t asSize() { return saturated ? ShapedType::kDynamic : v; }
int64_t asStride() {
- return saturated ? ShapedType::kDynamicStrideOrOffset : v;
+ return saturated ? ShapedType::kDynamic : v;
}
bool operator==(Wrapper other) {
return (saturated && other.saturated) ||
/// - `memRefTy == memref<?x?xf32, strided<[?, 1], offset: ?>>`
/// - `getAttributes == getConstantStrides` (i.e., a wrapper around
/// `getStridesAndOffset`), and
-/// - `isDynamic == isDynamicStrideOrOffset`
+/// - `isDynamic == ShapedType::isDynamic`
/// Will yield: `values == [2, 1]`
static void constifyIndexValues(
SmallVectorImpl<OpFoldResult> &values, MemRefType memRefTy,
newShapeConstants.push_back(constantIndexOp.value());
} else {
// Dynamic shape dimension not folded; copy dynamicSize from old memref.
- newShapeConstants.push_back(ShapedType::kDynamicSize);
+ newShapeConstants.push_back(ShapedType::kDynamic);
dynamicSizes.push_back(dynamicSize);
}
dynamicDimPos++;
// If cast is towards more static offset along any dimension, don't fold.
if (sourceOffset != resultOffset)
- if (ShapedType::isDynamicStrideOrOffset(sourceOffset) &&
- !ShapedType::isDynamicStrideOrOffset(resultOffset))
+ if (ShapedType::isDynamic(sourceOffset) &&
+ !ShapedType::isDynamic(resultOffset))
return false;
// If cast is towards more static strides along any dimension, don't fold.
for (auto it : llvm::zip(sourceStrides, resultStrides)) {
auto ss = std::get<0>(it), st = std::get<1>(it);
if (ss != st)
- if (ShapedType::isDynamicStrideOrOffset(ss) &&
- !ShapedType::isDynamicStrideOrOffset(st))
+ if (ShapedType::isDynamic(ss) &&
+ !ShapedType::isDynamic(st))
return false;
}
// same. They are also compatible if either one is dynamic (see
// description of MemRefCastOp for details).
auto checkCompatible = [](int64_t a, int64_t b) {
- return (a == MemRefType::getDynamicStrideOrOffset() ||
- b == MemRefType::getDynamicStrideOrOffset() || a == b);
+ return (ShapedType::isDynamic(a) ||
+ ShapedType::isDynamic(b) || a == b);
};
if (!checkCompatible(aOffset, bOffset))
return false;
ExtractStridedMetadataOp::getConstifiedMixedStrides() {
SmallVector<OpFoldResult> values = getAsOpFoldResult(getStrides());
constifyIndexValues(values, getSource().getType(), getContext(),
- getConstantStrides, ShapedType::isDynamicStrideOrOffset);
+ getConstantStrides, ShapedType::isDynamic);
return values;
}
OpFoldResult offsetOfr = getAsOpFoldResult(getOffset());
SmallVector<OpFoldResult> values(1, offsetOfr);
constifyIndexValues(values, getSource().getType(), getContext(),
- getConstantOffset, ShapedType::isDynamicStrideOrOffset);
+ getConstantOffset, ShapedType::isDynamic);
return values[0];
}
SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
dispatchIndexOpFoldResults(offset, dynamicOffsets, staticOffsets,
- ShapedType::kDynamicStrideOrOffset);
+ ShapedType::kDynamic);
dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
- ShapedType::kDynamicSize);
+ ShapedType::kDynamic);
dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
- ShapedType::kDynamicStrideOrOffset);
+ ShapedType::kDynamic);
build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
dynamicStrides, b.getI64ArrayAttr(staticOffsets),
b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
// Match offset in result memref type and in static_offsets attribute.
int64_t expectedOffset = extractFromI64ArrayAttr(getStaticOffsets()).front();
- if (!ShapedType::isDynamicStrideOrOffset(resultOffset) &&
- !ShapedType::isDynamicStrideOrOffset(expectedOffset) &&
+ if (!ShapedType::isDynamic(resultOffset) &&
+ !ShapedType::isDynamic(expectedOffset) &&
resultOffset != expectedOffset)
return emitError("expected result type with offset = ")
<< resultOffset << " instead of " << expectedOffset;
resultStrides, extractFromI64ArrayAttr(getStaticStrides())))) {
int64_t resultStride = std::get<0>(en.value());
int64_t expectedStride = std::get<1>(en.value());
- if (!ShapedType::isDynamicStrideOrOffset(resultStride) &&
- !ShapedType::isDynamicStrideOrOffset(expectedStride) &&
+ if (!ShapedType::isDynamic(resultStride) &&
+ !ShapedType::isDynamic(expectedStride) &&
resultStride != expectedStride)
return emitError("expected result type with stride = ")
<< expectedStride << " instead of " << resultStride
SmallVector<OpFoldResult> ReinterpretCastOp::getConstifiedMixedStrides() {
SmallVector<OpFoldResult> values = getMixedStrides();
constifyIndexValues(values, getType(), getContext(), getConstantStrides,
- ShapedType::isDynamicStrideOrOffset);
+ ShapedType::isDynamic);
return values;
}
assert(values.size() == 1 &&
"reinterpret_cast must have one and only one offset");
constifyIndexValues(values, getType(), getContext(), getConstantOffset,
- ShapedType::isDynamicStrideOrOffset);
+ ShapedType::isDynamic);
return values[0];
}
// the corresponding stride may have to be skipped. (See above comment.)
// Therefore, the result stride cannot be statically determined and must
// be dynamic.
- resultStrides.push_back(ShapedType::kDynamicStrideOrOffset);
+ resultStrides.push_back(ShapedType::kDynamic);
}
}
if (resultMemRefType) {
if (!resultMemRefType.getLayout().isIdentity())
return emitOpError("result memref type should have identity affine map");
- if (shapeSize == ShapedType::kDynamicSize)
+ if (shapeSize == ShapedType::kDynamic)
return emitOpError("cannot use shape operand with dynamic length to "
"reshape to statically-ranked memref type");
if (shapeSize != resultMemRefType.getRank())
SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
- ShapedType::kDynamicStrideOrOffset);
+ ShapedType::kDynamic);
dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
- ShapedType::kDynamicSize);
+ ShapedType::kDynamic);
dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
- ShapedType::kDynamicStrideOrOffset);
+ ShapedType::kDynamic);
return SubViewOp::inferResultType(sourceMemRefType, staticOffsets,
staticSizes, staticStrides);
}
SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
- ShapedType::kDynamicStrideOrOffset);
+ ShapedType::kDynamic);
dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
- ShapedType::kDynamicSize);
+ ShapedType::kDynamic);
dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
- ShapedType::kDynamicStrideOrOffset);
+ ShapedType::kDynamic);
return SubViewOp::inferRankReducedResultType(
resultShape, sourceRankedTensorType, staticOffsets, staticSizes,
staticStrides);
SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
- ShapedType::kDynamicStrideOrOffset);
+ ShapedType::kDynamic);
dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
- ShapedType::kDynamicSize);
+ ShapedType::kDynamic);
dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
- ShapedType::kDynamicStrideOrOffset);
+ ShapedType::kDynamic);
auto sourceMemRefType = source.getType().cast<MemRefType>();
// Structuring implementation this way avoids duplication between builders.
if (!resultType) {
detail::bindSymbolsList(rewriter.getContext(), symbols);
AffineExpr expr = symbols.front();
- values[0] = ShapedType::isDynamicStrideOrOffset(sourceOffset)
+ values[0] = ShapedType::isDynamic(sourceOffset)
? getAsOpFoldResult(newExtractStridedMetadata.getOffset())
: rewriter.getIndexAttr(sourceOffset);
SmallVector<OpFoldResult> subOffsets = subview.getMixedOffsets();
for (unsigned i = 0; i < sourceRank; ++i) {
// Compute the stride.
OpFoldResult origStride =
- ShapedType::isDynamicStrideOrOffset(sourceStrides[i])
+ ShapedType::isDynamic(sourceStrides[i])
? origStrides[i]
: OpFoldResult(rewriter.getIndexAttr(sourceStrides[i]));
strides.push_back(makeComposedFoldedAffineApply(
"getStridesAndOffset must work on valid expand_shape");
OpFoldResult origStride =
- ShapedType::isDynamicStrideOrOffset(strides[groupId])
+ ShapedType::isDynamic(strides[groupId])
? origStrides[groupId]
: builder.getIndexAttr(strides[groupId]);
int64_t innerMostDimForGroup = reassocGroup.back();
int64_t innerMostStrideForGroup = strides[innerMostDimForGroup];
collapsedStride.push_back(
- ShapedType::isDynamicStrideOrOffset(innerMostStrideForGroup)
+ ShapedType::isDynamic(innerMostStrideForGroup)
? origStrides[innerMostDimForGroup]
: builder.getIndexAttr(innerMostStrideForGroup));
unsigned reshapeRank = reshapeType.getRank();
OpFoldResult offsetOfr =
- ShapedType::isDynamicStrideOrOffset(offset)
+ ShapedType::isDynamic(offset)
? getAsOpFoldResult(newExtractStridedMetadata.getOffset())
: rewriter.getIndexAttr(offset);
return elementSize;
auto dims = memRefType.getShape();
- if (llvm::is_contained(dims, ShapedType::kDynamicSize) ||
- offset == MemRefType::getDynamicStrideOrOffset() ||
- llvm::is_contained(strides, MemRefType::getDynamicStrideOrOffset()))
+ if (llvm::is_contained(dims, ShapedType::kDynamic) ||
+ ShapedType::isDynamic(offset) ||
+ llvm::is_contained(strides, ShapedType::kDynamic))
return llvm::None;
int64_t memrefSize = -1;
int64_t offset;
SmallVector<int64_t, 4> strides;
if (failed(getStridesAndOffset(baseType, strides, offset)) ||
- llvm::is_contained(strides, MemRefType::getDynamicStrideOrOffset()) ||
- offset == MemRefType::getDynamicStrideOrOffset()) {
+ llvm::is_contained(strides, ShapedType::kDynamic) ||
+ ShapedType::isDynamic(offset)) {
return nullptr;
}
int64_t offset;
SmallVector<int64_t, 4> strides;
if (failed(getStridesAndOffset(baseType, strides, offset)) ||
- llvm::is_contained(strides, MemRefType::getDynamicStrideOrOffset()) ||
- offset == MemRefType::getDynamicStrideOrOffset()) {
+ llvm::is_contained(strides, ShapedType::kDynamic) ||
+ ShapedType::isDynamic(offset)) {
return nullptr;
}
else {
auto shapedTy = operands[0].getType().cast<ShapedType>();
int64_t rank =
- shapedTy.hasRank() ? shapedTy.getRank() : ShapedType::kDynamicSize;
+ shapedTy.hasRank() ? shapedTy.getRank() : ShapedType::kDynamic;
Type indexTy = IndexType::get(context);
Type extentTensorTy = RankedTensorType::get({rank}, indexTy);
inferredReturnTypes.assign({extentTensorTy});
// (e.g. 10 vs. 10, 10 vs. ?, or ? vs. ?), but reject direct mismatches or
// matches that would need a runtime assert (e.g. 10 vs. 20 or ? vs. 10).
for (unsigned d = 0, rank = tp1.getRank(); d < rank; d++)
- if (shape1[d] != shape2[d] && shape2[d] != ShapedType::kDynamicSize)
+ if (shape1[d] != shape2[d] && shape2[d] != ShapedType::kDynamic)
return emitError("unexpected conversion mismatch in dimension ") << d;
return success();
}
for (auto type : getInputs().getTypes()) {
auto shape = type.cast<RankedTensorType>().getShape();
for (auto dim : shape) {
- if (dim == ShapedType::kDynamicSize)
+ if (ShapedType::isDynamic(dim))
return emitError("Only statically-sized input tensors are supported.");
}
}
for (unsigned i = 0; i < rank; i++) {
auto dstDim = dstTp.getShape()[i];
if (i == concatDim) {
- if (dstDim != ShapedType::kDynamicSize) {
+ if (!ShapedType::isDynamic(dstDim)) {
unsigned sumDim = 0;
for (auto src : getInputs()) {
// If we reach here, all inputs should have static shapes.
int64_t prev = dstDim;
for (auto src : getInputs()) {
auto d = src.getType().cast<RankedTensorType>().getShape()[i];
- if (prev != ShapedType::kDynamicSize && d != prev)
+ if (!ShapedType::isDynamic(prev) && d != prev)
return emitError("All dimensions (expect for the concatenating one) "
"should be equal.");
prev = d;
int64_t dim = mtp.getShape()[0];
// We can't check the size of dynamic dimension at compile-time, but all
// xs and ys should have a dimension not less than n at runtime.
- if (n && dim != ShapedType::kDynamicSize && dim < n.value())
+ if (n && !ShapedType::isDynamic(dim) && dim < n.value())
return emitError(llvm::formatv("xs and ys need to have a dimension >= n"
": {0} < {1}",
dim, n.value()));
auto checkDim = [&](Value v, uint64_t min, const char *message) {
MemRefType tp = v.getType().cast<MemRefType>();
int64_t dim = tp.getShape()[0];
- if (dim != ShapedType::kDynamicSize && dim < (int64_t)min) {
+ if (!ShapedType::isDynamic(dim) && dim < (int64_t)min) {
emitError(llvm::formatv("{0} got {1} < {2}", message, dim, min));
}
};
auto rank = rtp.getRank();
auto shape = rtp.getShape();
auto enc = getSparseTensorEncoding(rtp);
- auto dynShape = {ShapedType::kDynamicSize};
+ auto dynShape = {ShapedType::kDynamic};
// Scan all dimensions of current tensor.
for (int64_t d = 0; d < rank; d++) {
// This should be called only once at beginning.
} else {
// Annotated sparse tensors.
// We also need the value buffer for annotated all dense `sparse` tensor.
- auto dynShape = {ShapedType::kDynamicSize};
+ auto dynShape = {ShapedType::kDynamic};
auto sparseTp = MemRefType::get(dynShape, elementType);
valBuffer[t] = builder.create<ToValuesOp>(loc, sparseTp, tensor);
}
// expanded from the i-th dimension in srcShape.
// For example, if srcDim = 8, then the expanded shape could be <2x?x2>,
// but not <2x?x?>.
- if (staticDstShape[j] == ShapedType::kDynamicSize) {
+ if (staticDstShape[j] == ShapedType::kDynamic) {
// The expanded dimension has dynamic size. We compute the dimension
// by dividing srcDim by the product of the static dimensions.
int64_t product = 1;
for (unsigned k = start; k < start + map.size(); k++) {
- if (staticDstShape[k] != ShapedType::kDynamicSize) {
+ if (staticDstShape[k] != ShapedType::kDynamic) {
product *= staticDstShape[k];
}
}
Value mlir::sparse_tensor::genAlloca(OpBuilder &builder, Location loc, Value sz,
Type tp) {
- auto memTp = MemRefType::get({ShapedType::kDynamicSize}, tp);
+ auto memTp = MemRefType::get({ShapedType::kDynamic}, tp);
return builder.create<memref::AllocaOp>(loc, memTp, ValueRange{sz});
}
auto memTp = MemRefType::get(shape, elemTp);
SmallVector<Value> dynamicSizes;
for (unsigned i = 0, rank = tensorTp.getRank(); i < rank; i++) {
- if (shape[i] == ShapedType::kDynamicSize)
+ if (shape[i] == ShapedType::kDynamic)
dynamicSizes.push_back(sizes[i]);
}
Value mem = builder.create<memref::AllocOp>(loc, memTp, dynamicSizes);
auto mtp = v.getType().cast<MemRefType>();
if (!mtp.isDynamicDim(0)) {
auto newMtp =
- MemRefType::get({ShapedType::kDynamicSize}, mtp.getElementType());
+ MemRefType::get({ShapedType::kDynamic}, mtp.getElementType());
v = rewriter.create<memref::CastOp>(loc, newMtp, v);
}
operands.push_back(v);
Value c2 = constantIndex(rewriter, loc, 2);
auto bufferType =
- MemRefType::get({ShapedType::kDynamicSize}, value.getType());
+ MemRefType::get({ShapedType::kDynamic}, value.getType());
scf::IfOp ifOp = rewriter.create<scf::IfOp>(loc, bufferType, cond,
/*else=*/true);
// True branch.
// order. Clients of this type know what field is what from the sparse
// tensor type.
if (isCompressedDim(rType, r)) {
- fields.push_back(MemRefType::get({ShapedType::kDynamicSize}, ptrType));
- fields.push_back(MemRefType::get({ShapedType::kDynamicSize}, idxType));
+ fields.push_back(MemRefType::get({ShapedType::kDynamic}, ptrType));
+ fields.push_back(MemRefType::get({ShapedType::kDynamic}, idxType));
} else if (isSingletonDim(rType, r)) {
- fields.push_back(MemRefType::get({ShapedType::kDynamicSize}, idxType));
+ fields.push_back(MemRefType::get({ShapedType::kDynamic}, idxType));
} else {
assert(isDenseDim(rType, r)); // no fields
}
}
// The values array.
- fields.push_back(MemRefType::get({ShapedType::kDynamicSize}, eltType));
+ fields.push_back(MemRefType::get({ShapedType::kDynamic}, eltType));
assert(fields.size() == lastField);
return success();
}
/// Creates allocation operation.
static Value createAllocation(OpBuilder &builder, Location loc, Type type,
Value sz, bool enableInit) {
- auto memType = MemRefType::get({ShapedType::kDynamicSize}, type);
+ auto memType = MemRefType::get({ShapedType::kDynamic}, type);
Value buffer = builder.create<memref::AllocOp>(loc, memType, sz);
if (enableInit) {
Value fillValue =
assert(sz); // This for sure is a sparse tensor
// Generate a memref for `sz` elements of type `t`.
auto genAlloc = [&](Type t) {
- auto memTp = MemRefType::get({ShapedType::kDynamicSize}, t);
+ auto memTp = MemRefType::get({ShapedType::kDynamic}, t);
return rewriter.create<memref::AllocOp>(loc, memTp, ValueRange{*sz});
};
// Allocate temporary buffers for values/filled-switch and added.
SparseTensorEncodingAttr &enc, ShapedType stp,
Value src, unsigned i) {
auto shape = stp.getShape();
- if (shape[i] == ShapedType::kDynamicSize)
+ if (shape[i] == ShapedType::kDynamic)
return genLvlSizeCall(builder, loc, enc, src, i);
return constantIndex(builder, loc, shape[i]);
}
Location loc, ShapedType stp) {
auto shape = stp.getShape();
for (unsigned i = 0, rank = stp.getRank(); i < rank; i++) {
- uint64_t s = shape[i] == ShapedType::kDynamicSize ? 0 : shape[i];
+ uint64_t s = shape[i] == ShapedType::kDynamic ? 0 : shape[i];
sizes.push_back(constantIndex(builder, loc, s));
}
}
sizesFromSrc(builder, sizes, loc, srcs[0]);
// Sum up on the `dim` if the dimension is dynamic.
- if (dstShape[dim] != ShapedType::kDynamicSize) {
+ if (dstShape[dim] != ShapedType::kDynamic) {
// Faithfully take the static size.
sizes[dim] = constantIndex(builder, loc, dstShape[dim]);
} else {
/// `memref<$sz x $tp>`). Unlike temporary buffers on the stack,
/// this buffer must be explicitly deallocated by client.
static Value genAlloc(RewriterBase &rewriter, Location loc, Value sz, Type tp) {
- auto memTp = MemRefType::get({ShapedType::kDynamicSize}, tp);
+ auto memTp = MemRefType::get({ShapedType::kDynamic}, tp);
return rewriter.create<memref::AllocOp>(loc, memTp, ValueRange{sz});
}
Location loc = op.getLoc();
// Query values array size for the actually stored values size.
Type eltType = op.getTensor().getType().cast<ShapedType>().getElementType();
- auto resTp = MemRefType::get({ShapedType::kDynamicSize}, eltType);
+ auto resTp = MemRefType::get({ShapedType::kDynamic}, eltType);
Value values = genValuesCall(rewriter, loc, resTp, adaptor.getOperands());
rewriter.replaceOpWithNewOp<memref::DimOp>(op, values,
constantIndex(rewriter, loc, 0));
Location loc, ShapedType stp, Value tensor) {
for (const auto &d : enumerate(stp.getShape())) {
Value dim;
- if (d.value() == ShapedType::kDynamicSize)
+ if (d.value() == ShapedType::kDynamic)
dim = builder.create<tensor::DimOp>(loc, tensor, d.index());
else
dim = constantIndex(builder, loc, d.value());
const SmallVectorImpl<Value> &sizes,
SmallVectorImpl<Value> &dynSizes) {
for (const auto &d : enumerate(tp.getShape())) {
- if (d.value() == ShapedType::kDynamicSize)
+ if (d.value() == ShapedType::kDynamic)
dynSizes.push_back(sizes[d.index()]);
}
}
genReshapeDstShape(loc, rewriter, dstSizes, srcSizes, dstShape,
op.getReassociationIndices());
for (auto &d : llvm::enumerate(dstShape)) {
- if (d.value() == ShapedType::kDynamicSize)
+ if (d.value() == ShapedType::kDynamic)
dstDynSizes.push_back(dstSizes[d.index()]);
}
}
if (!rtp.hasStaticShape()) {
ArrayRef<int64_t> rShape = rtp.getShape();
for (const auto &d : llvm::enumerate(rShape)) {
- if (d.value() == ShapedType::kDynamicSize) {
+ if (d.value() == ShapedType::kDynamic) {
Value v =
createOrFoldDimOp(rewriter, loc, op.getOperand(0), d.index());
rewriter.create<tensor::DimOp>(loc, op.getOperand(0), d.index());
// Sort the COO tensor so that its elements are ordered via increasing
// indices for the storage ordering of the dst tensor.
SparseTensorEncodingAttr encSrc = getSparseTensorEncoding(srcTp);
- auto dynShape = {ShapedType::kDynamicSize};
+ auto dynShape = {ShapedType::kDynamic};
auto indTp =
MemRefType::get(dynShape, getIndexOverheadType(rewriter, encSrc));
uint64_t rank = dstTp.getRank();
.getResult(0);
ArrayRef<int64_t> dstShape = dstTp.getShape();
for (auto &d : llvm::enumerate(dstShape)) {
- if (d.value() == ShapedType::kDynamicSize) {
+ if (d.value() == ShapedType::kDynamic) {
dynSizesArray.push_back(rewriter.create<memref::LoadOp>(
loc, dimSizes, constantIndex(rewriter, loc, d.index())));
}
Value tensor = lhs->get();
Location loc = op.getLoc();
if (atStart) {
- auto dynShape = {ShapedType::kDynamicSize};
+ auto dynShape = {ShapedType::kDynamic};
Type etp = tensor.getType().cast<ShapedType>().getElementType();
Type t1 = MemRefType::get(dynShape, etp);
Type t2 = MemRefType::get(dynShape, builder.getI1Type());
SmallVector<int64_t> staticShape;
SmallVector<Value> dynamicSizes;
dispatchIndexOpFoldResults(sizes, dynamicSizes, staticShape,
- ShapedType::kDynamicSize);
+ ShapedType::kDynamic);
build(builder, result, staticShape, elementType, dynamicSizes, encoding);
}
}
APInt index;
if (!matchPattern(*operandsIt, m_ConstantInt(&index))) {
- newShape.push_back(ShapedType::kDynamicSize);
+ newShape.push_back(ShapedType::kDynamic);
newOperands.push_back(*operandsIt++);
continue;
}
unsigned dim = m.getNumResults();
auto band = shape.slice(currentDim, dim);
int64_t size = 1;
- if (llvm::is_contained(band, ShapedType::kDynamicSize))
- size = ShapedType::kDynamicSize;
+ if (llvm::is_contained(band, ShapedType::kDynamic))
+ size = ShapedType::kDynamic;
else
for (unsigned d = 0; d < dim; ++d)
size *= shape[currentDim + d];
SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
- ShapedType::kDynamicStrideOrOffset);
+ ShapedType::kDynamic);
dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
- ShapedType::kDynamicSize);
+ ShapedType::kDynamic);
dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
- ShapedType::kDynamicStrideOrOffset);
+ ShapedType::kDynamic);
return ExtractSliceOp::inferResultType(sourceShapedTensorType, staticOffsets,
staticSizes, staticStrides);
}
SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
- ShapedType::kDynamicStrideOrOffset);
+ ShapedType::kDynamic);
dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
- ShapedType::kDynamicSize);
+ ShapedType::kDynamic);
dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
- ShapedType::kDynamicStrideOrOffset);
+ ShapedType::kDynamic);
return ExtractSliceOp::inferCanonicalRankReducedResultType(
desiredResultRank, sourceRankedTensorType, staticOffsets, staticSizes,
staticStrides);
SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
- ShapedType::kDynamicStrideOrOffset);
+ ShapedType::kDynamic);
dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
- ShapedType::kDynamicSize);
+ ShapedType::kDynamic);
dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
- ShapedType::kDynamicStrideOrOffset);
+ ShapedType::kDynamic);
auto sourceRankedTensorType = source.getType().cast<RankedTensorType>();
// Structuring implementation this way avoids duplication between builders.
if (!resultType) {
// Check if there are any dynamic parts, which are not supported.
auto offsets = extractFromI64ArrayAttr(op.getStaticOffsets());
- if (llvm::is_contained(offsets, ShapedType::kDynamicStrideOrOffset))
+ if (llvm::is_contained(offsets, ShapedType::kDynamic))
return failure();
auto sizes = extractFromI64ArrayAttr(op.getStaticSizes());
- if (llvm::is_contained(sizes, ShapedType::kDynamicSize))
+ if (llvm::is_contained(sizes, ShapedType::kDynamic))
return failure();
auto strides = extractFromI64ArrayAttr(op.getStaticStrides());
- if (llvm::is_contained(strides, ShapedType::kDynamicStrideOrOffset))
+ if (llvm::is_contained(strides, ShapedType::kDynamic))
return failure();
// Compute the stride for each dimension.
SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
- ShapedType::kDynamicStrideOrOffset);
+ ShapedType::kDynamic);
dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
- ShapedType::kDynamicSize);
+ ShapedType::kDynamic);
dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
- ShapedType::kDynamicStrideOrOffset);
+ ShapedType::kDynamic);
build(b, result, dest.getType(), source, dest, dynamicOffsets, dynamicSizes,
dynamicStrides, b.getI64ArrayAttr(staticOffsets),
b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
SmallVector<OpFoldResult> mixedOffsets(insertSliceOp.getMixedOffsets());
SmallVector<OpFoldResult> mixedSizes(insertSliceOp.getMixedSizes());
SmallVector<OpFoldResult> mixedStrides(insertSliceOp.getMixedStrides());
- canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamicStrideOrOffset);
+ canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamic);
canonicalizeSubViewPart(mixedSizes, ShapedType::isDynamic);
- canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamicStrideOrOffset);
+ canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamic);
// Create the new op in canonical form.
auto sourceType = ExtractSliceOp::inferCanonicalRankReducedResultType(
SmallVector<int64_t, 4> inferredShape;
for (auto i : llvm::seq<unsigned>(0, rank)) {
if (sourceType.isDynamicDim(i) ||
- staticLow[i] == ShapedType::kDynamicSize ||
- staticHigh[i] == ShapedType::kDynamicSize) {
- inferredShape.push_back(resultShape.empty() ? ShapedType::kDynamicSize
+ staticLow[i] == ShapedType::kDynamic ||
+ staticHigh[i] == ShapedType::kDynamic) {
+ inferredShape.push_back(resultShape.empty() ? ShapedType::kDynamic
: resultShape[i]);
} else {
int64_t size = sourceType.getDimSize(i) + staticLow[i] + staticHigh[i];
assert((resultShape.empty() || size == resultShape[i] ||
- resultShape[i] == ShapedType::kDynamicSize) &&
+ resultShape[i] == ShapedType::kDynamic) &&
"mismatch between inferred shape and result shape");
inferredShape.push_back(size);
}
ArrayRef<NamedAttribute> attrs) {
auto sourceType = source.getType().cast<RankedTensorType>();
unsigned rank = sourceType.getRank();
- SmallVector<int64_t, 4> staticVector(rank, ShapedType::kDynamicSize);
+ SmallVector<int64_t, 4> staticVector(rank, ShapedType::kDynamic);
build(b, result, source, staticVector, staticVector, low, high, nofold,
attrs);
}
// dynamic (ie not a constant), dynamicLow and dynamicHigh will grow with 1
// value as well.
dispatchIndexOpFoldResults(low, dynamicLow, staticLow,
- ShapedType::kDynamicSize);
+ ShapedType::kDynamic);
dispatchIndexOpFoldResults(high, dynamicHigh, staticHigh,
- ShapedType::kDynamicSize);
+ ShapedType::kDynamic);
if (!resultType) {
resultType = PadOp::inferResultType(sourceType, staticLow, staticHigh);
}
SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
- ShapedType::kDynamicStrideOrOffset);
+ ShapedType::kDynamic);
dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
- ShapedType::kDynamicSize);
+ ShapedType::kDynamic);
dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
- ShapedType::kDynamicStrideOrOffset);
+ ShapedType::kDynamic);
build(b, result, {}, source, dest, dynamicOffsets, dynamicSizes,
dynamicStrides, b.getI64ArrayAttr(staticOffsets),
b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
if (auto constInt = getConstantIntValue(val)) {
staticIndices.push_back(*constInt);
} else {
- staticIndices.push_back(ShapedType::kDynamicSize);
+ staticIndices.push_back(ShapedType::kDynamic);
dynIndices.push_back(val);
}
};
// The shape of the result can be obtained from the sizes passed in.
SmallVector<Value> dynDims;
SmallVector<int64_t> shape;
- dispatchIndexOpFoldResults(sizes, dynDims, shape, ShapedType::kDynamicSize);
+ dispatchIndexOpFoldResults(sizes, dynDims, shape, ShapedType::kDynamic);
RankedTensorType resultType =
RankedTensorType::get(shape, padOp.getResultType().getElementType());
auto tensorTy = rankedTensor.getType().cast<RankedTensorType>();
SmallVector<Value> dynamicDims;
for (const auto &en : llvm::enumerate(tensorTy.getShape())) {
- if (en.value() == ShapedType::kDynamicSize)
+ if (en.value() == ShapedType::kDynamic)
dynamicDims.push_back(
b.create<tensor::DimOp>(loc, rankedTensor, en.index()));
}
// Copy the Operand's rank.
if (!hasRankedInput)
- outputShape.resize(operandShape.getRank(), ShapedType::kDynamicSize);
+ outputShape.resize(operandShape.getRank(), ShapedType::kDynamic);
// Copy shapes until the dim is non-dynamic.
for (int i = 0, s = operandShape.getRank(); i < s; i++) {
if (i == axis || operandShape.isDynamicDim(i))
continue;
- if (outputShape[i] == ShapedType::kDynamicSize)
+ if (outputShape[i] == ShapedType::kDynamic)
outputShape[i] = operandShape.getDimSize(i);
if (outputShape[i] != operandShape.getDimSize(i))
return failure();
// We need to know the length of the concatenation axis of all inputs to
// determine the dimension size of the output shape.
if (!operandShape.hasRank() || operandShape.isDynamicDim(axis)) {
- concatDimSize = ShapedType::kDynamicSize;
+ concatDimSize = ShapedType::kDynamic;
break;
}
// All shapes are dynamic.
SmallVector<int64_t> outShape;
- outShape.resize(2, ShapedType::kDynamicSize);
+ outShape.resize(2, ShapedType::kDynamic);
if (inputShape.hasRank()) {
outShape[0] = inputShape.getDimSize(0);
}
if (biasShape.hasRank()) {
- outShape[1] = outShape[1] == ShapedType::kDynamicSize
+ outShape[1] = outShape[1] == ShapedType::kDynamic
? biasShape.getDimSize(0)
: outShape[1];
}
// All shapes are dynamic.
SmallVector<int64_t> outShape;
- outShape.resize(3, ShapedType::kDynamicSize);
+ outShape.resize(3, ShapedType::kDynamic);
if (lhsShape.hasRank()) {
outShape[0] = lhsShape.getDimSize(0);
}
if (rhsShape.hasRank()) {
- outShape[0] = outShape[0] == ShapedType::kDynamicSize
+ outShape[0] = outShape[0] == ShapedType::kDynamic
? rhsShape.getDimSize(0)
: outShape[0];
outShape[2] = rhsShape.getDimSize(2);
return success();
}
- outputShape.resize(paddingShape.getDimSize(0), ShapedType::kDynamicSize);
+ outputShape.resize(paddingShape.getDimSize(0), ShapedType::kDynamic);
inferredReturnShapes.push_back(ShapedTypeComponents(outputShape));
return success();
}
DenseIntElementsAttr paddings;
// If the paddings value is not a constant, all dimensions must be dynamic.
if (!matchPattern(operands[1], m_Constant(&paddings))) {
- outputShape.resize(inputShape.getRank(), ShapedType::kDynamicSize);
+ outputShape.resize(inputShape.getRank(), ShapedType::kDynamic);
inferredReturnShapes.push_back(ShapedTypeComponents(outputShape));
return success();
}
outputShape.reserve(inputShape.getRank());
for (int i = 0, s = inputShape.getRank(); i < s; i++) {
if (inputShape.isDynamicDim(i)) {
- outputShape.push_back(ShapedType::kDynamicSize);
+ outputShape.push_back(ShapedType::kDynamic);
continue;
}
static SmallVector<int64_t> convertToMlirShape(ArrayRef<int64_t> shape) {
return to_vector(llvm::map_range(shape, [](int64_t dim) {
- return dim == -1 ? ShapedType::kDynamicSize : dim;
+ return dim == -1 ? ShapedType::kDynamic : dim;
}));
}
ShapeAdaptor inputShape = operands.getShape(0);
SmallVector<int64_t> outputShape;
if (!inputShape.hasRank()) {
- outputShape.resize(multiples.size(), ShapedType::kDynamicSize);
+ outputShape.resize(multiples.size(), ShapedType::kDynamic);
inferredReturnShapes.push_back(ShapedTypeComponents(outputShape));
return success();
}
outputShape.reserve(multiples.size());
for (int i = 0, s = inputShape.getRank(); i < s; i++) {
int64_t dim = inputShape.getDimSize(i);
- if (dim != ShapedType::kDynamicSize)
+ if (dim != ShapedType::kDynamic)
dim *= multipleValues[i];
outputShape.push_back(dim);
}
return success();
}
-
LogicalResult tosa::ReshapeOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
// can determine the output rank.
SmallVector<int64_t> outputShape;
if (!inputShape.hasRank()) {
- outputShape.resize(permsShape.getDimSize(0), ShapedType::kDynamicSize);
+ outputShape.resize(permsShape.getDimSize(0), ShapedType::kDynamic);
inferredReturnShapes.push_back(ShapedTypeComponents(outputShape));
return success();
}
return success();
}
- outputShape.resize(inputShape.getRank(), ShapedType::kDynamicSize);
+ outputShape.resize(inputShape.getRank(), ShapedType::kDynamic);
// If the permuations are a constant we can directly determine the output
// shape.
if (ShapeAdaptor permShape = operands.getValueAsShape(1)) {
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
llvm::SmallVector<int64_t> outputShape;
- outputShape.resize(3, ShapedType::kDynamicSize);
+ outputShape.resize(3, ShapedType::kDynamic);
ShapeAdaptor valuesShape = operands.getShape(0);
if (valuesShape.hasRank()) {
ShapeAdaptor indicesShape = operands.getShape(1);
if (indicesShape.hasRank()) {
- if (outputShape[0] == ShapedType::kDynamicSize)
+ if (outputShape[0] == ShapedType::kDynamic)
outputShape[0] = indicesShape.getDimSize(0);
- if (outputShape[1] == ShapedType::kDynamicSize)
+ if (outputShape[1] == ShapedType::kDynamic)
outputShape[1] = indicesShape.getDimSize(1);
}
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
ResizeOpAdaptor adaptor(operands, attributes);
llvm::SmallVector<int64_t, 4> outputShape;
- outputShape.resize(4, ShapedType::kDynamicSize);
+ outputShape.resize(4, ShapedType::kDynamic);
ShapeAdaptor inputShape = operands.getShape(adaptor.getInput());
if (!inputShape.hasRank())
int64_t inputHeight = inputShape.getDimSize(1);
int64_t inputWidth = inputShape.getDimSize(2);
- if ((inputHeight == ShapedType::kDynamicSize) ||
- (inputWidth == ShapedType::kDynamicSize))
+ if ((inputHeight == ShapedType::kDynamic) ||
+ (inputWidth == ShapedType::kDynamic))
return failure();
llvm::SmallVector<int64_t> scaleInt;
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
llvm::SmallVector<int64_t> outputShape;
- outputShape.resize(3, ShapedType::kDynamicSize);
+ outputShape.resize(3, ShapedType::kDynamic);
ShapeAdaptor valuesInShape = operands.getShape(0);
if (valuesInShape.hasRank()) {
ShapeAdaptor indicesShape = operands.getShape(1);
if (indicesShape.hasRank()) {
- if (outputShape[0] == ShapedType::kDynamicSize)
+ if (outputShape[0] == ShapedType::kDynamic)
outputShape[0] = indicesShape.getDimSize(0);
}
ShapeAdaptor inputShape = operands.getShape(2);
if (inputShape.hasRank()) {
- if (outputShape[0] == ShapedType::kDynamicSize)
+ if (outputShape[0] == ShapedType::kDynamic)
outputShape[0] = inputShape.getDimSize(0);
- if (outputShape[2] == ShapedType::kDynamicSize)
+ if (outputShape[2] == ShapedType::kDynamic)
outputShape[2] = inputShape.getDimSize(2);
}
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
ShapeAdaptor inputShape = operands.getShape(0);
llvm::SmallVector<int64_t> outputShape;
- outputShape.resize(4, ShapedType::kDynamicSize);
+ outputShape.resize(4, ShapedType::kDynamic);
// We only know the rank if the input type is unranked.
if (!inputShape) {
MLIRContext *context, ::llvm::Optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
- llvm::SmallVector<int64_t> outputShape(4, ShapedType::kDynamicSize);
+ llvm::SmallVector<int64_t> outputShape(4, ShapedType::kDynamic);
Conv2DOp::Adaptor adaptor(operands.getValues(), attributes);
- int64_t inputWidth = ShapedType::kDynamicSize;
- int64_t inputHeight = ShapedType::kDynamicSize;
- int64_t weightWidth = ShapedType::kDynamicSize;
- int64_t weightHeight = ShapedType::kDynamicSize;
+ int64_t inputWidth = ShapedType::kDynamic;
+ int64_t inputHeight = ShapedType::kDynamic;
+ int64_t weightWidth = ShapedType::kDynamic;
+ int64_t weightHeight = ShapedType::kDynamic;
// Input shape describes input width/height and batch.
MLIRContext *context, ::llvm::Optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
- llvm::SmallVector<int64_t> outputShape(5, ShapedType::kDynamicSize);
+ llvm::SmallVector<int64_t> outputShape(5, ShapedType::kDynamic);
Conv3DOp::Adaptor adaptor(operands.getValues(), attributes);
- int64_t inputWidth = ShapedType::kDynamicSize;
- int64_t inputHeight = ShapedType::kDynamicSize;
- int64_t inputDepth = ShapedType::kDynamicSize;
+ int64_t inputWidth = ShapedType::kDynamic;
+ int64_t inputHeight = ShapedType::kDynamic;
+ int64_t inputDepth = ShapedType::kDynamic;
- int64_t weightWidth = ShapedType::kDynamicSize;
- int64_t weightHeight = ShapedType::kDynamicSize;
- int64_t weightDepth = ShapedType::kDynamicSize;
+ int64_t weightWidth = ShapedType::kDynamic;
+ int64_t weightHeight = ShapedType::kDynamic;
+ int64_t weightDepth = ShapedType::kDynamic;
// Input shape describes input width/height and batch.
ShapeAdaptor inputShape = operands.getShape(adaptor.getInput());
MLIRContext *context, ::llvm::Optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
- llvm::SmallVector<int64_t> outputShape(4, ShapedType::kDynamicSize);
+ llvm::SmallVector<int64_t> outputShape(4, ShapedType::kDynamic);
DepthwiseConv2DOp::Adaptor adaptor(operands.getValues(), attributes);
- int64_t inputWidth = ShapedType::kDynamicSize;
- int64_t inputHeight = ShapedType::kDynamicSize;
- int64_t inputChannels = ShapedType::kDynamicSize;
+ int64_t inputWidth = ShapedType::kDynamic;
+ int64_t inputHeight = ShapedType::kDynamic;
+ int64_t inputChannels = ShapedType::kDynamic;
- int64_t weightWidth = ShapedType::kDynamicSize;
- int64_t weightHeight = ShapedType::kDynamicSize;
- int64_t depthChannels = ShapedType::kDynamicSize;
+ int64_t weightWidth = ShapedType::kDynamic;
+ int64_t weightHeight = ShapedType::kDynamic;
+ int64_t depthChannels = ShapedType::kDynamic;
// Input shape describes input width/height and batch.
ShapeAdaptor inputShape = operands.getShape(adaptor.getInput());
getI64Values(adaptor.getOutShape(), outputShape);
outputShape = convertToMlirShape(outputShape);
- int64_t inputWidth = ShapedType::kDynamicSize;
- int64_t inputHeight = ShapedType::kDynamicSize;
- int64_t weightWidth = ShapedType::kDynamicSize;
- int64_t weightHeight = ShapedType::kDynamicSize;
+ int64_t inputWidth = ShapedType::kDynamic;
+ int64_t inputHeight = ShapedType::kDynamic;
+ int64_t weightWidth = ShapedType::kDynamic;
+ int64_t weightHeight = ShapedType::kDynamic;
// Input shape describes input width/height and batch.
ShapeAdaptor inputShape = operands.getShape(adaptor.getInput());
// Reshape input to [N,IH,IW,IC] -> [N * IH * IW, IC].
ArrayRef<int64_t> inputShape = inputType.getShape();
- int64_t combined = ShapedType::kDynamicSize;
+ int64_t combined = ShapedType::kDynamic;
if (numDynamic == 0)
combined = inputShape[0] * inputShape[1] * inputShape[2];
llvm::SmallVector<int64_t, 2> revisedInputShape{combined, inputShape[3]};
} else if (*i2 == 1) {
*iR = *i1;
} else {
- *iR = ShapedType::kDynamicSize;
+ *iR = ShapedType::kDynamic;
}
} else {
if (*i1 == *i2 || *i2 == 1) {
break;
int64_t currTargetShape = targetShape[targetDim];
- while (sourceShape[sourceDim] != ShapedType::kDynamicSize &&
+ while (sourceShape[sourceDim] != ShapedType::kDynamic &&
prodOfCollapsedDims * sourceShape[sourceDim] < currTargetShape &&
sourceDim < sourceShape.size()) {
prodOfCollapsedDims *= sourceShape[sourceDim];
// If the current expanded dimension is dynamic, then the collapsed
// dimensions should also be dynamic and product of all previous unprocessed
// dimensions of the expanded shape should be 1.
- if (sourceShape[sourceDim] == ShapedType::kDynamicSize &&
- (currTargetShape != ShapedType::kDynamicSize ||
+ if (sourceShape[sourceDim] == ShapedType::kDynamic &&
+ (currTargetShape != ShapedType::kDynamic ||
prodOfCollapsedDims != 1))
return llvm::None;
// If the collapsed dim is dynamic, the current expanded dim should also
// be dynamic.
- if (currTargetShape == ShapedType::kDynamicSize &&
- sourceShape[sourceDim] != ShapedType::kDynamicSize)
+ if (currTargetShape == ShapedType::kDynamic &&
+ sourceShape[sourceDim] != ShapedType::kDynamic)
return llvm::None;
// For static shapes, if the product of dimensions of the expanded shape
// Process any remaining entries in the source shape. They all need to be
// 1 or dynamic.
for (; sourceDim < sourceShape.size(); sourceDim++) {
- if (sourceShape[sourceDim] != ShapedType::kDynamicSize &&
+ if (sourceShape[sourceDim] != ShapedType::kDynamic &&
sourceShape[sourceDim] != 1)
return llvm::None;
// The map is empty when the target type is a scalar.
resStrides(bT.getRank(), 0);
for (int64_t idx = 0, e = aT.getRank(); idx < e; ++idx) {
resShape[idx] =
- (aShape[idx] == bShape[idx]) ? aShape[idx] : ShapedType::kDynamicSize;
+ (aShape[idx] == bShape[idx]) ? aShape[idx] : ShapedType::kDynamic;
resStrides[idx] = (aStrides[idx] == bStrides[idx])
? aStrides[idx]
- : ShapedType::kDynamicStrideOrOffset;
+ : ShapedType::kDynamic;
}
resOffset =
- (aOffset == bOffset) ? aOffset : ShapedType::kDynamicStrideOrOffset;
+ (aOffset == bOffset) ? aOffset : ShapedType::kDynamic;
return MemRefType::get(
resShape, aT.getElementType(),
StridedLayoutAttr::get(aT.getContext(), resOffset, resStrides));
/// Prints a strided layout attribute.
void StridedLayoutAttr::print(llvm::raw_ostream &os) const {
auto printIntOrQuestion = [&](int64_t value) {
- if (value == ShapedType::kDynamicStrideOrOffset)
+ if (ShapedType::isDynamic(value))
os << "?";
else
os << value;
// AffineExpr for offset.
// Static case.
- if (offset != MemRefType::getDynamicStrideOrOffset()) {
+ if (!ShapedType::isDynamic(offset)) {
auto cst = getAffineConstantExpr(offset, context);
expr = cst;
} else {
auto d = getAffineDimExpr(dim, context);
AffineExpr mult;
// Static case.
- if (stride != MemRefType::getDynamicStrideOrOffset())
+ if (!ShapedType::isDynamic(stride))
mult = getAffineConstantExpr(stride, context);
else
// Dynamic case, new symbol for each new stride.
// ShapedType
//===----------------------------------------------------------------------===//
-constexpr int64_t ShapedType::kDynamicSize;
-constexpr int64_t ShapedType::kDynamicStrideOrOffset;
+constexpr int64_t ShapedType::kDynamic;
+constexpr int64_t ShapedType::kDynamic;
int64_t ShapedType::getNumElements(ArrayRef<int64_t> shape) {
int64_t num = 1;
if (!BaseMemRefType::isValidElementType(elementType))
return emitError() << "invalid memref element type";
- // Negative sizes are not allowed except for `kDynamicSize`.
+ // Negative sizes are not allowed except for `kDynamic`.
for (int64_t s : shape)
if (s < 0 && !ShapedType::isDynamic(s))
return emitError() << "invalid memref size";
}
/// A stride specification is a list of integer values that are either static
-/// or dynamic (encoded with ShapedType::kDynamicStrideOrOffset). Strides encode
+/// or dynamic (encoded with ShapedType::kDynamic). Strides encode
/// the distance in the number of elements between successive entries along a
/// particular dimension.
///
if (auto cst = offsetExpr.dyn_cast<AffineConstantExpr>())
offset = cst.getValue();
else
- offset = ShapedType::kDynamicStrideOrOffset;
+ offset = ShapedType::kDynamic;
for (auto e : strideExprs) {
if (auto c = e.dyn_cast<AffineConstantExpr>())
strides.push_back(c.getValue());
else
- strides.push_back(ShapedType::kDynamicStrideOrOffset);
+ strides.push_back(ShapedType::kDynamic);
}
return success();
}
<< op.getMixedSizes().size() << " vs " << op.getMixedStrides().size()
<< ") so the rank of the result type is well-formed.";
- if (failed(verifyListOfOperandsOrIntegers(
- op, "offset", maxRanks[0], op.static_offsets(), op.offsets(),
- ShapedType::isDynamicStrideOrOffset)))
+ if (failed(verifyListOfOperandsOrIntegers(op, "offset", maxRanks[0],
+ op.static_offsets(), op.offsets(),
+ ShapedType::isDynamic)))
return failure();
if (failed(verifyListOfOperandsOrIntegers(op, "size", maxRanks[1],
op.static_sizes(), op.sizes(),
ShapedType::isDynamic)))
return failure();
- if (failed(verifyListOfOperandsOrIntegers(
- op, "stride", maxRanks[2], op.static_strides(), op.strides(),
- ShapedType::isDynamicStrideOrOffset)))
+ if (failed(verifyListOfOperandsOrIntegers(op, "stride", maxRanks[2],
+ op.static_strides(), op.strides(),
+ ShapedType::isDynamic)))
return failure();
return success();
}
SmallVector<OpFoldResult, 4>
mlir::getMixedStridesOrOffsets(ArrayAttr staticValues,
ValueRange dynamicValues) {
- return getMixedValues(staticValues, dynamicValues,
- ShapedType::kDynamicStrideOrOffset);
+ return getMixedValues(staticValues, dynamicValues, ShapedType::kDynamic);
}
SmallVector<OpFoldResult, 4> mlir::getMixedSizes(ArrayAttr staticValues,
ValueRange dynamicValues) {
- return getMixedValues(staticValues, dynamicValues, ShapedType::kDynamicSize);
+ return getMixedValues(staticValues, dynamicValues, ShapedType::kDynamic);
}
std::pair<ArrayAttr, SmallVector<Value>>
std::pair<ArrayAttr, SmallVector<Value>> mlir::decomposeMixedStridesOrOffsets(
OpBuilder &b, const SmallVectorImpl<OpFoldResult> &mixedValues) {
- return decomposeMixedValues(b, mixedValues,
- ShapedType::kDynamicStrideOrOffset);
+ return decomposeMixedValues(b, mixedValues, ShapedType::kDynamic);
}
std::pair<ArrayAttr, SmallVector<Value>>
mlir::decomposeMixedSizes(OpBuilder &b,
const SmallVectorImpl<OpFoldResult> &mixedValues) {
- return decomposeMixedValues(b, mixedValues, ShapedType::kDynamicSize);
+ return decomposeMixedValues(b, mixedValues, ShapedType::kDynamic);
}
return;
}
llvm::outs() << "MemRefType offset: ";
- if (offset == MemRefType::getDynamicStrideOrOffset())
+ if (ShapedType::isDynamic(offset))
llvm::outs() << "?";
else
llvm::outs() << offset;
llvm::outs() << " strides: ";
llvm::interleaveComma(strides, llvm::outs(), [&](int64_t v) {
- if (v == MemRefType::getDynamicStrideOrOffset())
+ if (ShapedType::isDynamic(v))
llvm::outs() << "?";
else
llvm::outs() << v;
return emitOptionalError(location, "only shaped type operands allowed");
}
int64_t dim =
- sval.hasRank() ? sval.getShape().front() : ShapedType::kDynamicSize;
+ sval.hasRank() ? sval.getShape().front() : ShapedType::kDynamic;
auto type = IntegerType::get(context, 17);
inferredReturnShapes.push_back(ShapedTypeComponents({dim}, type));
return success();
TEST(BroadcastShapeTest, InterleavingUnknowns) {
SmallVector<int64_t, 4> result;
- int64_t dyn = mlir::ShapedType::kDynamicSize;
+ int64_t dyn = mlir::ShapedType::kDynamic;
ASSERT_TRUE(getBroadcastedShape({1, 2, dyn, dyn, dyn}, {dyn, dyn, dyn, 4, 1},
result));
EXPECT_THAT(result, ElementsAre(dyn, 2, dyn, 4, dyn));