--- /dev/null
+//===- IndexingUtils.h - Indexing utilities supporting Linalg ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_LINALG_UTILS_INDEXINGUTILS_H
+#define MLIR_DIALECT_LINALG_UTILS_INDEXINGUTILS_H
+
+#include "mlir/Dialect/Linalg/IR/Linalg.h"
+#include "mlir/Dialect/SCF/IR/SCF.h"
+#include "mlir/Dialect/Utils/StructuredOpsUtils.h"
+#include "llvm/ADT/StringSet.h"
+#include <optional>
+
+namespace mlir {
+namespace linalg {
+
+/// Create one memref::DimOp or tensor::DimOp depending on the type of `val`.
+/// This is a polymorphic convenience function to abstract away the rank and
+/// concrete type of `val`.
+/// Asserts that `val` is a memref or tensor type.
+Value createOrFoldDimOp(OpBuilder &b, Location loc, Value val, int64_t dim);
+
+/// Create one memref::DimOp or tensor::DimOp depending on the type of `val`.
+/// This is a polymorphic convenience function to abstract away the rank and
+/// concrete type of `val`.
+/// Asserts that `val` is a memref or tensor type.
+OpFoldResult createFoldedDimOp(OpBuilder &b, Location loc, Value val,
+ int64_t dim);
+
+/// Build the list of DimOp for the dynamic dimensions of `val`.
+/// Asserts that `val` is a ranked shaped type.
+SmallVector<Value> createDynamicDimensions(OpBuilder &b, Location loc,
+ Value val);
+
+/// Build the list of all dimensions for `val`, mixing static attributes and
+/// dynamic values where appropriate.
+/// Asserts that `val` is a ranked shaped type.
+SmallVector<OpFoldResult> getMixedDimensions(OpBuilder &b, Location loc,
+ Value val);
+
+} // namespace linalg
+} // namespace mlir
+#endif // MLIR_DIALECT_LINALG_UTILS_INDEXINGUTILS_H
#define MLIR_DIALECT_LINALG_UTILS_UTILS_H
#include "mlir/Dialect/Linalg/IR/Linalg.h"
+#include "mlir/Dialect/Linalg/Utils/IndexingUtils.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Dialect/Utils/StructuredOpsUtils.h"
#include "llvm/ADT/StringSet.h"
/// Check if iterator type has "reduction" semantics.
bool isReductionIterator(utils::IteratorType iteratorType);
-/// Helper function that creates a memref::DimOp or tensor::DimOp depending on
-/// the type of `source`.
-Value createOrFoldDimOp(OpBuilder &b, Location loc, Value source, int64_t dim);
-OpFoldResult createFoldedDimOp(OpBuilder &b, Location loc, Value source,
- int64_t dim);
-
-/// Given an operation, retrieves the value of each dynamic dimension through
-/// constructing the necessary DimOp operators.
-SmallVector<Value, 4> getDynOperands(Location loc, Value val, OpBuilder &b);
-
/// Create a tensor::PadOp that pads `source` to the size of the statically
/// sized `type` whose static sizes are assumed to be greater than the dynamic
/// `source` size. The padding introduces trailing `pad` values until the
/// transformation).
FailureOr<FusionInfo> fuseProducerOfTensor(OpBuilder &b,
OpOperand &consumerOpOperand);
+
/// Tensor counterpart of `fuseProducerOfBuffer`.
/// This implements the fusion part of the "tileAndFuse on tensors"
/// transformation and thus requires the `consumerOpOperand` to be a
Value firstOperand = operands.front();
auto rankedTensorType = t.cast<RankedTensorType>();
auto staticShape = llvm::to_vector<4>(rankedTensorType.getShape());
- auto dynamicShape = linalg::getDynOperands(loc, firstOperand, b);
+ auto dynamicShape = linalg::createDynamicDimensions(b, loc, firstOperand);
res.push_back(b.create<tensor::EmptyOp>(
loc, staticShape, rankedTensorType.getElementType(), dynamicShape));
add_mlir_dialect_library(MLIRLinalgUtils
Utils.cpp
+ IndexingUtils.cpp
ADDITIONAL_HEADER_DIRS
${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/Linalg
MLIRPass
MLIRTensorUtils
MLIRTransformUtils
- )
+)
--- /dev/null
+//===- IndexingUtils.cpp - Indexing utilities supporting Linalg -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements indexing utilities for the Linalg dialect.
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/Linalg/Utils/Utils.h"
+
+#include "mlir/Analysis/SliceAnalysis.h"
+#include "mlir/Dialect/Affine/Analysis/AffineStructures.h"
+#include "mlir/Dialect/Affine/IR/AffineOps.h"
+#include "mlir/Dialect/Affine/IR/AffineValueMap.h"
+#include "mlir/Dialect/Affine/LoopUtils.h"
+#include "mlir/Dialect/Arith/IR/Arith.h"
+#include "mlir/Dialect/Arith/Utils/Utils.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
+#include "mlir/Dialect/Linalg/IR/Linalg.h"
+#include "mlir/Dialect/MemRef/IR/MemRef.h"
+#include "mlir/Dialect/SCF/IR/SCF.h"
+#include "mlir/Dialect/Tensor/IR/Tensor.h"
+#include "mlir/Dialect/Tensor/Utils/Utils.h"
+#include "mlir/Dialect/Utils/IndexingUtils.h"
+#include "mlir/Dialect/Utils/StaticValueUtils.h"
+#include "mlir/IR/AffineExpr.h"
+#include "mlir/IR/AffineExprVisitor.h"
+#include "mlir/IR/AffineMap.h"
+#include "mlir/IR/Matchers.h"
+#include "mlir/IR/OpImplementation.h"
+#include "mlir/Pass/Pass.h"
+#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/TypeSwitch.h"
+#include "llvm/Support/Debug.h"
+#include <optional>
+
+#define DEBUG_TYPE "linalg-utils"
+
+namespace mlir {
+namespace linalg {
+Value createOrFoldDimOp(OpBuilder &b, Location loc, Value val, int64_t dim) {
+ if (val.getType().isa<UnrankedMemRefType, MemRefType>())
+ return b.createOrFold<memref::DimOp>(loc, val, dim);
+ if (val.getType().isa<UnrankedTensorType, RankedTensorType>())
+ return b.createOrFold<tensor::DimOp>(loc, val, dim);
+ llvm_unreachable("Expected MemRefType or TensorType");
+}
+
+OpFoldResult createFoldedDimOp(OpBuilder &b, Location loc, Value val,
+ int64_t dim) {
+ auto shapedType = val.getType().cast<ShapedType>();
+ if (!shapedType.hasRank() || shapedType.isDynamicDim(dim))
+ return createOrFoldDimOp(b, loc, val, dim);
+ return b.getIndexAttr(shapedType.getDimSize(dim));
+}
+
+SmallVector<Value> createDynamicDimensions(OpBuilder &b, Location loc,
+ Value val) {
+ auto shapedType = val.getType().cast<ShapedType>();
+ assert(shapedType.hasRank() && "`val` must have a static rank");
+ SmallVector<Value> res;
+ res.reserve(shapedType.getRank());
+ for (const auto &dim : llvm::enumerate(shapedType.getShape())) {
+ if (dim.value() == ShapedType::kDynamic)
+ res.push_back(createOrFoldDimOp(b, loc, val, dim.index()));
+ }
+ return res;
+}
+
+SmallVector<OpFoldResult> getMixedDimensions(OpBuilder &b, Location loc,
+ Value val) {
+ auto shapedType = val.getType().cast<ShapedType>();
+ assert(shapedType.hasRank() && "`val` must have a static rank");
+ SmallVector<Value> dynamicDims = createDynamicDimensions(b, loc, val);
+ return getMixedValues(shapedType.getShape(), dynamicDims, b);
+}
+} // namespace linalg
+} // namespace mlir
return iteratorType == utils::IteratorType::reduction;
}
-/// Helper function that creates a memref::DimOp or tensor::DimOp depending on
-/// the type of `source`.
-Value createOrFoldDimOp(OpBuilder &b, Location loc, Value source, int64_t dim) {
- if (source.getType().isa<UnrankedMemRefType, MemRefType>())
- return b.createOrFold<memref::DimOp>(loc, source, dim);
- if (source.getType().isa<UnrankedTensorType, RankedTensorType>())
- return b.createOrFold<tensor::DimOp>(loc, source, dim);
- llvm_unreachable("Expected MemRefType or TensorType");
-}
-
-OpFoldResult createFoldedDimOp(OpBuilder &b, Location loc, Value source,
- int64_t dim) {
- auto shapedType = source.getType().cast<ShapedType>();
- if (!shapedType.hasRank() || shapedType.isDynamicDim(dim))
- return createOrFoldDimOp(b, loc, source, dim);
- return b.getIndexAttr(shapedType.getDimSize(dim));
-}
-
-/// Given an operation, retrieves the value of each dynamic dimension through
-/// constructing the necessary DimOp operators.
-SmallVector<Value, 4> getDynOperands(Location loc, Value val, OpBuilder &b) {
- SmallVector<Value, 4> dynOperands;
- auto shapedType = val.getType().cast<ShapedType>();
- for (const auto &dim : llvm::enumerate(shapedType.getShape())) {
- if (dim.value() == ShapedType::kDynamic)
- dynOperands.push_back(createOrFoldDimOp(b, loc, val, dim.index()));
- }
- return dynOperands;
-}
-
Value makeComposedPadHighOp(OpBuilder &b, Location loc, RankedTensorType type,
Value source, Value pad, bool nofold) {
// Exit if `source` is not defined by an ExtractSliceOp.