From bf812ea484b71ec41d6811646d89876499956235 Mon Sep 17 00:00:00 2001 From: Aart Bik Date: Mon, 10 May 2021 12:56:15 -0700 Subject: [PATCH] [mlir][linalg] remove the -now- obsolete sparse support in linalg All glue and clutter in the linalg ops has been replaced by proper sparse tensor type encoding. This code is no longer needed. Thanks to ntv@ for giving us a temporary home in linalg. So long, and thanks for all the fish. Reviewed By: bixia Differential Revision: https://reviews.llvm.org/D102098 --- .../mlir/Dialect/Linalg/IR/LinalgInterfaces.td | 13 ----- .../mlir/Dialect/Linalg/IR/LinalgStructuredOps.td | 6 +-- .../mlir/Dialect/Utils/StructuredOpsUtils.h | 15 ------ mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp | 58 ++-------------------- mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp | 2 +- .../Dialect/Linalg/Transforms/FusionOnTensors.cpp | 12 ++--- .../mlir/dialects/linalg/opdsl/lang/emitter.py | 15 +++--- 7 files changed, 16 insertions(+), 105 deletions(-) diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td index 0512b35..80bd377 100644 --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td @@ -1002,19 +1002,6 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> { >, InterfaceMethod< /*desc=*/[{ - Return whether the op has sparse tensor semantics. - }], - /*retTy=*/"bool", - /*methodName=*/"hasSparseSemantics", - /*args=*/(ins), - /*methodBody=*/"", - /*defaultImplementation=*/[{ - return $_op->getAttr(getSparseAttrName()). - template dyn_cast_or_null() != nullptr; - }] - >, - InterfaceMethod< - /*desc=*/[{ Return the name registered for this op when lowering to an external library call. }], diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td index 79b7a7d..8336668 100644 --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td @@ -527,9 +527,7 @@ class GenericOpBase : LinalgStructuredBase_Op:$doc, - OptionalAttr:$library_call, - // ArrayAttr of StrArrayAttr: - OptionalAttr:$sparse); + OptionalAttr:$library_call); let results = (outs Variadic:$result_tensors); let regions = (region AnyRegion:$region); let extraClassDeclaration = structuredOpsBaseDecls # [{ @@ -583,8 +581,6 @@ def GenericOp : GenericOpBase<"generic"> { Each element of the list represents and iterator of one of the following types: parallel, reduction, window - - sparse: an optional list with per-dimension sparsity annotations (either - "D" for dense or "S" for sparse) for each input and output view. Example: Defining a #matmul_trait attribute in MLIR can be done as follows: diff --git a/mlir/include/mlir/Dialect/Utils/StructuredOpsUtils.h b/mlir/include/mlir/Dialect/Utils/StructuredOpsUtils.h index c7d2476..d6ccea1 100644 --- a/mlir/include/mlir/Dialect/Utils/StructuredOpsUtils.h +++ b/mlir/include/mlir/Dialect/Utils/StructuredOpsUtils.h @@ -58,9 +58,6 @@ constexpr StringRef getDocAttrName() { return "doc"; } /// function that implements the structured op. constexpr StringRef getLibraryCallAttrName() { return "library_call"; } -/// Attribute name for the ArrayAttr of StrArrayAttr that encodes sparsity. -constexpr StringRef getSparseAttrName() { return "sparse"; } - /// Attribute name for the StrArrayAttr which encodes the value of strides. constexpr StringRef getStridesAttrName() { return "strides"; } @@ -129,18 +126,6 @@ inline StringRef toString(IteratorType t) { llvm_unreachable("Unsupported IteratorType"); } -/// Use to encode a dense or sparse dimension. -constexpr StringRef getSparseDimName() { return "S"; } -inline bool isSparseDim(Attribute attr) { - auto strAttr = attr.dyn_cast_or_null(); - return strAttr && strAttr.getValue() == getSparseDimName(); -} -constexpr StringRef getDenseDimName() { return "D"; } -inline bool isDenseDim(Attribute attr) { - auto strAttr = attr.dyn_cast_or_null(); - return strAttr && strAttr.getValue() == getDenseDimName(); -} - } // end namespace mlir #endif // MLIR_UTILS_STRUCTUREDOPSUTILS_H diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp index 01c240f..5f635c7 100644 --- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp @@ -447,8 +447,8 @@ void GenericOp::build( builder.getAffineMapArrayAttr(indexingMaps), builder.getStrArrayAttr(iteratorTypes), doc.empty() ? StringAttr() : builder.getStringAttr(doc), - libraryCall.empty() ? StringAttr() : builder.getStringAttr(libraryCall), - ArrayAttr()); + libraryCall.empty() ? StringAttr() + : builder.getStringAttr(libraryCall)); if (!bodyBuild) return; @@ -502,8 +502,8 @@ void IndexedGenericOp::build( builder.getAffineMapArrayAttr(indexingMaps), builder.getStrArrayAttr(iteratorTypes), doc.empty() ? StringAttr() : builder.getStringAttr(doc), - libraryCall.empty() ? StringAttr() : builder.getStringAttr(libraryCall), - ArrayAttr()); + libraryCall.empty() ? StringAttr() + : builder.getStringAttr(libraryCall)); if (!bodyBuild) return; @@ -676,58 +676,8 @@ void IndexedGenericOp::getEffects( getInputBuffers(), getOutputBuffers()); } -namespace { - -template -struct AnnotationsVerifier { - static LogicalResult verify(GenericOpType op) { return success(); } -}; - -template <> -LogicalResult AnnotationsVerifier::verify(GenericOp op) { - ArrayAttr sparseAttr = op.sparseAttr(); - if (!sparseAttr) - return success(); - // Verify consistency of sparse annotations. - if (!op.hasTensorSemantics()) - return op.emitOpError("expected sparse annotations on tensors only"); - if (op.getNumOutputs() != 1) - return op.emitOpError("expected single output tensor"); - unsigned numTensors = op.getNumShapedOperands(); - if (sparseAttr.size() != numTensors) - return op.emitOpError("expected one sparse annotation for each tensor"); - for (unsigned t = 0; t < numTensors; t++) { - auto dimAttr = sparseAttr[t].dyn_cast_or_null(); - if (!dimAttr) - return op.emitOpError("expected sparse annotation array for tensor ") - << t; - unsigned rank = op.getShapedType(t).getRank(); - if (dimAttr.size() != rank) - return op.emitOpError("expected sparse annotation with rank ") - << rank << " for tensor " << t; - // Per-dimension annotations for each tensor consist of only "D" or "S". - for (unsigned d = 0; d < rank; d++) { - if (isDenseDim(dimAttr[d])) { - continue; - } else if (isSparseDim(dimAttr[d])) { - if (t == numTensors - 1) - return op.emitOpError("sparse output tensors not supported (yet)"); - continue; - } - return op.emitOpError("expected sparse annotation at position ") - << d << " for tensor " << t; - } - } - return success(); -} - -} // namespace - template static LogicalResult verifyGenericOp(GenericOpType op) { - if (failed(AnnotationsVerifier::verify(op))) - return failure(); - return success(); } diff --git a/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp index bd2fdac..ea25013 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp @@ -88,7 +88,7 @@ finalizeBufferAllocationForGenericOp(ConversionPatternRewriter &rewriter, /*inputs=*/inputs, /*outputs=*/outputs, genericOp.indexing_maps(), genericOp.iterator_types(), genericOp.docAttr(), - genericOp.library_callAttr(), genericOp.sparseAttr()); + genericOp.library_callAttr()); // Create a new block in the region of the new Generic Op. Block *oldBlock = genericOp.getBody(); diff --git a/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp b/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp index 4ee534d..6a0c597 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp @@ -321,8 +321,7 @@ fuseElementwiseOpsImpl(LinalgOp producer, OpOperand &consumerOpOperand, consumer.getOutputs(), rewriter.getAffineMapArrayAttr(fusedIndexMaps), consumer.iterator_types(), /*doc=*/nullptr, - /*library_call=*/nullptr, - /*sparse=*/nullptr); + /*library_call=*/nullptr); } else { fusedOp = rewriter.create( consumer.getLoc(), consumer->getResultTypes(), @@ -331,8 +330,7 @@ fuseElementwiseOpsImpl(LinalgOp producer, OpOperand &consumerOpOperand, consumer.getOutputs(), rewriter.getAffineMapArrayAttr(fusedIndexMaps), consumer.iterator_types(), /*doc=*/nullptr, - /*library_call=*/nullptr, - /*sparse=*/nullptr); + /*library_call=*/nullptr); } // Construct an AffineMap from consumer loops to producer loops. @@ -1260,8 +1258,7 @@ struct FoldConsumerReshapeOpByLinearization /*outputs=*/output, rewriter.getAffineMapArrayAttr(fusedIndexMaps), producer.iterator_types(), /*doc=*/nullptr, - /*library_call=*/nullptr, - /*sparse=*/nullptr); + /*library_call=*/nullptr); auto &fusedRegion = fusedOp->getRegion(0); rewriter.cloneRegionBefore(producer->getRegion(0), fusedRegion, fusedRegion.begin()); @@ -1352,8 +1349,7 @@ public: rewriter.getAffineMapArrayAttr(fusedIndexMaps), linalgOp.iterator_types(), /*doc=*/nullptr, - /*library_call=*/nullptr, - /*sparse=*/nullptr); + /*library_call=*/nullptr); // Map the block argument corresponding to the replaced argument with the // scalar constant. diff --git a/mlir/python/mlir/dialects/linalg/opdsl/lang/emitter.py b/mlir/python/mlir/dialects/linalg/opdsl/lang/emitter.py index 4a03702..85c77d5 100644 --- a/mlir/python/mlir/dialects/linalg/opdsl/lang/emitter.py +++ b/mlir/python/mlir/dialects/linalg/opdsl/lang/emitter.py @@ -89,12 +89,10 @@ def prepare_common_structured_op(op_config: LinalgStructuredOpConfig, for am in AffineMap.compress_unused_symbols(op_config.indexing_maps, Context.current)]) iterator_types_attr = ArrayAttr.get( [StringAttr.get(s) for s in op_config.iterator_types]) - # TODO: Add support for sparse operands once there is a stable interface. - sparse_attr = None return (all_arg_defs, in_arg_defs, out_arg_defs, outs, result_types, type_mapping, capture_arg_mapping, indexing_maps_attr, - iterator_types_attr, sparse_attr) + iterator_types_attr) def emit_generic_structured_op(op_config: LinalgStructuredOpConfig, @@ -102,7 +100,7 @@ def emit_generic_structured_op(op_config: LinalgStructuredOpConfig, outs: Sequence[Value] = (), captures: Sequence[Value] = ()): all_arg_defs, in_arg_defs, out_arg_defs, outs, result_types, type_mapping, \ - capture_arg_mapping, indexing_maps_attr, iterator_types_attr, sparse_attr = \ + capture_arg_mapping, indexing_maps_attr, iterator_types_attr = \ prepare_common_structured_op(op_config, *ins, outs = outs, captures=captures) @@ -113,8 +111,7 @@ def emit_generic_structured_op(op_config: LinalgStructuredOpConfig, indexing_maps=indexing_maps_attr, iterator_types=iterator_types_attr, doc=None, # TODO: Make optional. - library_call=None, # TODO: Make optional. - sparse=sparse_attr) # TODO: Make optional. + library_call=None) # TODO: Make optional. # Construct the body. block_arg_names = _get_tensor_def_names(*in_arg_defs, *out_arg_defs) @@ -141,7 +138,7 @@ def emit_named_structured_op(op_config: LinalgStructuredOpConfig, outs: Sequence[Value] = (), captures: Sequence[Value] = ()): all_arg_defs, in_arg_defs, out_arg_defs, outs, result_types, type_mapping, \ - capture_arg_mapping, indexing_maps_attr, iterator_types_attr, sparse_attr = \ + capture_arg_mapping, indexing_maps_attr, iterator_types_attr = \ prepare_common_structured_op(op_config, *ins, outs = outs, captures = captures) @@ -351,8 +348,8 @@ def _get_tensor_def_names( def _add_type_mapping(name: str, type: Type, type_mapping: Dict[str, Type]): if name in type_mapping: if type_mapping[name] != type: - raise ValueError(f"Cannot overwrite type mapping {name} = " - f"{type_mapping[name]} by type {type}") + raise ValueError(f"Cannot overwrite type mapping {name} = " + f"{type_mapping[name]} by type {type}") type_mapping[name] = type def _is_floating_point_type(t: Type) -> bool: -- 2.7.4