>,
InterfaceMethod<
/*desc=*/[{
- Return whether the op has sparse tensor semantics.
- }],
- /*retTy=*/"bool",
- /*methodName=*/"hasSparseSemantics",
- /*args=*/(ins),
- /*methodBody=*/"",
- /*defaultImplementation=*/[{
- return $_op->getAttr(getSparseAttrName()).
- template dyn_cast_or_null<ArrayAttr>() != nullptr;
- }]
- >,
- InterfaceMethod<
- /*desc=*/[{
Return the name registered for this op when lowering to an external
library call.
}],
AffineMapArrayAttr:$indexing_maps,
ArrayAttr:$iterator_types,
OptionalAttr<StrAttr>:$doc,
- OptionalAttr<StrAttr>:$library_call,
- // ArrayAttr of StrArrayAttr:
- OptionalAttr<ArrayAttr>:$sparse);
+ OptionalAttr<StrAttr>:$library_call);
let results = (outs Variadic<AnyRankedTensor>:$result_tensors);
let regions = (region AnyRegion:$region);
let extraClassDeclaration = structuredOpsBaseDecls # [{
Each element of the list represents and iterator of one of the following
types:
parallel, reduction, window
- - sparse: an optional list with per-dimension sparsity annotations (either
- "D" for dense or "S" for sparse) for each input and output view.
Example:
Defining a #matmul_trait attribute in MLIR can be done as follows:
/// function that implements the structured op.
constexpr StringRef getLibraryCallAttrName() { return "library_call"; }
-/// Attribute name for the ArrayAttr of StrArrayAttr that encodes sparsity.
-constexpr StringRef getSparseAttrName() { return "sparse"; }
-
/// Attribute name for the StrArrayAttr which encodes the value of strides.
constexpr StringRef getStridesAttrName() { return "strides"; }
llvm_unreachable("Unsupported IteratorType");
}
-/// Use to encode a dense or sparse dimension.
-constexpr StringRef getSparseDimName() { return "S"; }
-inline bool isSparseDim(Attribute attr) {
- auto strAttr = attr.dyn_cast_or_null<StringAttr>();
- return strAttr && strAttr.getValue() == getSparseDimName();
-}
-constexpr StringRef getDenseDimName() { return "D"; }
-inline bool isDenseDim(Attribute attr) {
- auto strAttr = attr.dyn_cast_or_null<StringAttr>();
- return strAttr && strAttr.getValue() == getDenseDimName();
-}
-
} // end namespace mlir
#endif // MLIR_UTILS_STRUCTUREDOPSUTILS_H
builder.getAffineMapArrayAttr(indexingMaps),
builder.getStrArrayAttr(iteratorTypes),
doc.empty() ? StringAttr() : builder.getStringAttr(doc),
- libraryCall.empty() ? StringAttr() : builder.getStringAttr(libraryCall),
- ArrayAttr());
+ libraryCall.empty() ? StringAttr()
+ : builder.getStringAttr(libraryCall));
if (!bodyBuild)
return;
builder.getAffineMapArrayAttr(indexingMaps),
builder.getStrArrayAttr(iteratorTypes),
doc.empty() ? StringAttr() : builder.getStringAttr(doc),
- libraryCall.empty() ? StringAttr() : builder.getStringAttr(libraryCall),
- ArrayAttr());
+ libraryCall.empty() ? StringAttr()
+ : builder.getStringAttr(libraryCall));
if (!bodyBuild)
return;
getInputBuffers(), getOutputBuffers());
}
-namespace {
-
-template <typename GenericOpType>
-struct AnnotationsVerifier {
- static LogicalResult verify(GenericOpType op) { return success(); }
-};
-
-template <>
-LogicalResult AnnotationsVerifier<GenericOp>::verify(GenericOp op) {
- ArrayAttr sparseAttr = op.sparseAttr();
- if (!sparseAttr)
- return success();
- // Verify consistency of sparse annotations.
- if (!op.hasTensorSemantics())
- return op.emitOpError("expected sparse annotations on tensors only");
- if (op.getNumOutputs() != 1)
- return op.emitOpError("expected single output tensor");
- unsigned numTensors = op.getNumShapedOperands();
- if (sparseAttr.size() != numTensors)
- return op.emitOpError("expected one sparse annotation for each tensor");
- for (unsigned t = 0; t < numTensors; t++) {
- auto dimAttr = sparseAttr[t].dyn_cast_or_null<ArrayAttr>();
- if (!dimAttr)
- return op.emitOpError("expected sparse annotation array for tensor ")
- << t;
- unsigned rank = op.getShapedType(t).getRank();
- if (dimAttr.size() != rank)
- return op.emitOpError("expected sparse annotation with rank ")
- << rank << " for tensor " << t;
- // Per-dimension annotations for each tensor consist of only "D" or "S".
- for (unsigned d = 0; d < rank; d++) {
- if (isDenseDim(dimAttr[d])) {
- continue;
- } else if (isSparseDim(dimAttr[d])) {
- if (t == numTensors - 1)
- return op.emitOpError("sparse output tensors not supported (yet)");
- continue;
- }
- return op.emitOpError("expected sparse annotation at position ")
- << d << " for tensor " << t;
- }
- }
- return success();
-}
-
-} // namespace
-
template <typename GenericOpType>
static LogicalResult verifyGenericOp(GenericOpType op) {
- if (failed(AnnotationsVerifier<GenericOpType>::verify(op)))
- return failure();
-
return success();
}
/*inputs=*/inputs,
/*outputs=*/outputs, genericOp.indexing_maps(),
genericOp.iterator_types(), genericOp.docAttr(),
- genericOp.library_callAttr(), genericOp.sparseAttr());
+ genericOp.library_callAttr());
// Create a new block in the region of the new Generic Op.
Block *oldBlock = genericOp.getBody();
consumer.getOutputs(), rewriter.getAffineMapArrayAttr(fusedIndexMaps),
consumer.iterator_types(),
/*doc=*/nullptr,
- /*library_call=*/nullptr,
- /*sparse=*/nullptr);
+ /*library_call=*/nullptr);
} else {
fusedOp = rewriter.create<IndexedGenericOp>(
consumer.getLoc(), consumer->getResultTypes(),
consumer.getOutputs(), rewriter.getAffineMapArrayAttr(fusedIndexMaps),
consumer.iterator_types(),
/*doc=*/nullptr,
- /*library_call=*/nullptr,
- /*sparse=*/nullptr);
+ /*library_call=*/nullptr);
}
// Construct an AffineMap from consumer loops to producer loops.
/*outputs=*/output, rewriter.getAffineMapArrayAttr(fusedIndexMaps),
producer.iterator_types(),
/*doc=*/nullptr,
- /*library_call=*/nullptr,
- /*sparse=*/nullptr);
+ /*library_call=*/nullptr);
auto &fusedRegion = fusedOp->getRegion(0);
rewriter.cloneRegionBefore(producer->getRegion(0), fusedRegion,
fusedRegion.begin());
rewriter.getAffineMapArrayAttr(fusedIndexMaps),
linalgOp.iterator_types(),
/*doc=*/nullptr,
- /*library_call=*/nullptr,
- /*sparse=*/nullptr);
+ /*library_call=*/nullptr);
// Map the block argument corresponding to the replaced argument with the
// scalar constant.
for am in AffineMap.compress_unused_symbols(op_config.indexing_maps, Context.current)])
iterator_types_attr = ArrayAttr.get(
[StringAttr.get(s) for s in op_config.iterator_types])
- # TODO: Add support for sparse operands once there is a stable interface.
- sparse_attr = None
return (all_arg_defs, in_arg_defs, out_arg_defs, outs, result_types,
type_mapping, capture_arg_mapping, indexing_maps_attr,
- iterator_types_attr, sparse_attr)
+ iterator_types_attr)
def emit_generic_structured_op(op_config: LinalgStructuredOpConfig,
outs: Sequence[Value] = (),
captures: Sequence[Value] = ()):
all_arg_defs, in_arg_defs, out_arg_defs, outs, result_types, type_mapping, \
- capture_arg_mapping, indexing_maps_attr, iterator_types_attr, sparse_attr = \
+ capture_arg_mapping, indexing_maps_attr, iterator_types_attr = \
prepare_common_structured_op(op_config, *ins, outs = outs,
captures=captures)
indexing_maps=indexing_maps_attr,
iterator_types=iterator_types_attr,
doc=None, # TODO: Make optional.
- library_call=None, # TODO: Make optional.
- sparse=sparse_attr) # TODO: Make optional.
+ library_call=None) # TODO: Make optional.
# Construct the body.
block_arg_names = _get_tensor_def_names(*in_arg_defs, *out_arg_defs)
outs: Sequence[Value] = (),
captures: Sequence[Value] = ()):
all_arg_defs, in_arg_defs, out_arg_defs, outs, result_types, type_mapping, \
- capture_arg_mapping, indexing_maps_attr, iterator_types_attr, sparse_attr = \
+ capture_arg_mapping, indexing_maps_attr, iterator_types_attr = \
prepare_common_structured_op(op_config, *ins, outs = outs,
captures = captures)
def _add_type_mapping(name: str, type: Type, type_mapping: Dict[str, Type]):
if name in type_mapping:
if type_mapping[name] != type:
- raise ValueError(f"Cannot overwrite type mapping {name} = "
- f"{type_mapping[name]} by type {type}")
+ raise ValueError(f"Cannot overwrite type mapping {name} = "
+ f"{type_mapping[name]} by type {type}")
type_mapping[name] = type
def _is_floating_point_type(t: Type) -> bool: