ArrayRef<int64_t> staticOffsets,
ArrayRef<int64_t> staticSizes,
ArrayRef<int64_t> staticStrides);
+ static Type inferResultType(MemRefType sourceMemRefType,
+ ArrayRef<OpFoldResult> staticOffsets,
+ ArrayRef<OpFoldResult> staticSizes,
+ ArrayRef<OpFoldResult> staticStrides);
/// Return the expected rank of each of the`static_offsets`, `static_sizes`
/// and `static_strides` attributes.
ArrayRef<int64_t> staticOffsets,
ArrayRef<int64_t> staticSizes,
ArrayRef<int64_t> staticStrides);
+ static Type inferResultType(RankedTensorType sourceRankedTensorType,
+ ArrayRef<OpFoldResult> staticOffsets,
+ ArrayRef<OpFoldResult> staticSizes,
+ ArrayRef<OpFoldResult> staticStrides);
/// Return the expected rank of each of the`static_offsets`, `static_sizes`
/// and `static_strides` attributes.
sourceMemRefType.getMemorySpace());
}
+Type SubViewOp::inferResultType(MemRefType sourceMemRefType,
+ ArrayRef<OpFoldResult> leadingStaticOffsets,
+ ArrayRef<OpFoldResult> leadingStaticSizes,
+ ArrayRef<OpFoldResult> leadingStaticStrides) {
+ SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
+ SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
+ dispatchIndexOpFoldResults(leadingStaticOffsets, dynamicOffsets,
+ staticOffsets, ShapedType::kDynamicStrideOrOffset);
+ dispatchIndexOpFoldResults(leadingStaticSizes, dynamicSizes, staticSizes,
+ ShapedType::kDynamicSize);
+ dispatchIndexOpFoldResults(leadingStaticStrides, dynamicStrides,
+ staticStrides, ShapedType::kDynamicStrideOrOffset);
+ return SubViewOp::inferResultType(sourceMemRefType, staticOffsets,
+ staticSizes, staticStrides)
+ .cast<MemRefType>();
+}
+
// Build a SubViewOp with mixed static and dynamic entries and custom result
// type. If the type passed is nullptr, it is inferred.
void mlir::SubViewOp::build(OpBuilder &b, OperationState &result,
sourceRankedTensorType.getElementType());
}
+Type SubTensorOp::inferResultType(RankedTensorType sourceRankedTensorType,
+ ArrayRef<OpFoldResult> leadingStaticOffsets,
+ ArrayRef<OpFoldResult> leadingStaticSizes,
+ ArrayRef<OpFoldResult> leadingStaticStrides) {
+ SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
+ SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
+ dispatchIndexOpFoldResults(leadingStaticOffsets, dynamicOffsets,
+ staticOffsets, ShapedType::kDynamicStrideOrOffset);
+ dispatchIndexOpFoldResults(leadingStaticSizes, dynamicSizes, staticSizes,
+ ShapedType::kDynamicSize);
+ dispatchIndexOpFoldResults(leadingStaticStrides, dynamicStrides,
+ staticStrides, ShapedType::kDynamicStrideOrOffset);
+ return SubTensorOp::inferResultType(sourceRankedTensorType, staticOffsets,
+ staticSizes, staticStrides)
+ .cast<RankedTensorType>();
+}
+
// Build a SubTensorOp with mixed static and dynamic entries and custom result
// type. If the type passed is nullptr, it is inferred.
void mlir::SubTensorOp::build(OpBuilder &b, OperationState &result,