This differential replaces all uses of SparseTensorEncodingAttr::DimLevelType with DimLevelType. The next differential will break out a separate library for the DimLevelType enum, so that the Dialect code doesn't need to depend on the rest of the runtime
Depends On D135995
Reviewed By: aartbik
Differential Revision: https://reviews.llvm.org/D135996
#ifndef MLIR_DIALECT_SPARSETENSOR_IR_SPARSETENSOR_H_
#define MLIR_DIALECT_SPARSETENSOR_IR_SPARSETENSOR_H_
+#include "mlir/ExecutionEngine/SparseTensor/Enums.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Dialect.h"
#include "mlir/IR/OpDefinition.h"
// Dimension level types.
//
-bool isDenseDim(SparseTensorEncodingAttr::DimLevelType dltp);
-bool isCompressedDim(SparseTensorEncodingAttr::DimLevelType dltp);
-bool isSingletonDim(SparseTensorEncodingAttr::DimLevelType dltp);
-
-/// Convenience method to test for dense dimension (0 <= d < rank).
-bool isDenseDim(RankedTensorType type, uint64_t d);
-
-/// Convenience method to test for compressed dimension (0 <= d < rank).
-bool isCompressedDim(RankedTensorType type, uint64_t d);
-
-/// Convenience method to test for singleton dimension (0 <= d < rank).
-bool isSingletonDim(RankedTensorType type, uint64_t d);
+// Cannot be constexpr, because `getRank` isn't constexpr. However,
+// for some strange reason, the wrapper functions below don't trigger
+// the same [-Winvalid-constexpr] warning (despite this function not
+// being constexpr).
+inline DimLevelType getDimLevelType(RankedTensorType type, uint64_t d) {
+ assert(d < static_cast<uint64_t>(type.getRank()));
+ if (auto enc = getSparseTensorEncoding(type))
+ return enc.getDimLevelType()[d];
+ return DimLevelType::Dense; // unannotated tensor is dense
+}
+
+/// Convenience function to test for dense dimension (0 <= d < rank).
+constexpr bool isDenseDim(RankedTensorType type, uint64_t d) {
+ return isDenseDLT(getDimLevelType(type, d));
+}
+
+/// Convenience function to test for compressed dimension (0 <= d < rank).
+constexpr bool isCompressedDim(RankedTensorType type, uint64_t d) {
+ return isCompressedDLT(getDimLevelType(type, d));
+}
+
+/// Convenience function to test for singleton dimension (0 <= d < rank).
+constexpr bool isSingletonDim(RankedTensorType type, uint64_t d) {
+ return isSingletonDLT(getDimLevelType(type, d));
+}
//
// Dimension level properties.
//
-bool isOrderedDim(SparseTensorEncodingAttr::DimLevelType dltp);
-bool isUniqueDim(SparseTensorEncodingAttr::DimLevelType dltp);
-
-/// Convenience method to test for ordered property in the
+/// Convenience function to test for ordered property in the
/// given dimension (0 <= d < rank).
-bool isOrderedDim(RankedTensorType type, uint64_t d);
+constexpr bool isOrderedDim(RankedTensorType type, uint64_t d) {
+ return isOrderedDLT(getDimLevelType(type, d));
+}
-/// Convenience method to test for unique property in the
+/// Convenience function to test for unique property in the
/// given dimension (0 <= d < rank).
-bool isUniqueDim(RankedTensorType type, uint64_t d);
+constexpr bool isUniqueDim(RankedTensorType type, uint64_t d) {
+ return isUniqueDLT(getDimLevelType(type, d));
+}
//
// Reordering.
ins
// A dimension level type for each dimension of the tensor type.
ArrayRefParameter<
- "SparseTensorEncodingAttr::DimLevelType",
+ "::mlir::sparse_tensor::DimLevelType",
"per dimension level type"
>: $dimLevelType,
// A dimension order on the indices of this tensor type.
let genVerifyDecl = 1;
let hasCustomAssemblyFormat = 1;
-
- let extraClassDeclaration = [{
- // Dimension level types. By default, each type has the unique and
- // ordered properties. Alternatives properties are indicated by
- // Nu (not-unique) and No (not-ordered).
- //
- // TODO: separate type and property in encoding
- //
- enum class DimLevelType : uint8_t {
- Dense = 4, // 0b001_00
- Compressed = 8, // 0b010_00
- CompressedNu = 9, // 0b010_01
- CompressedNo = 10, // 0b010_10
- CompressedNuNo = 11, // 0b010_11
- Singleton = 16, // 0b100_00
- SingletonNu = 17, // 0b100_01
- SingletonNo = 18, // 0b100_10
- SingletonNuNo = 19, // 0b100_11
- };
- }];
}
def IsSparseTensorPred
// Ensure the C-API enums are int-castable to C++ equivalents.
static_assert(
static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_DENSE) ==
- static_cast<int>(SparseTensorEncodingAttr::DimLevelType::Dense) &&
+ static_cast<int>(DimLevelType::Dense) &&
static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED) ==
- static_cast<int>(
- SparseTensorEncodingAttr::DimLevelType::Compressed) &&
+ static_cast<int>(DimLevelType::Compressed) &&
static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU) ==
- static_cast<int>(
- SparseTensorEncodingAttr::DimLevelType::CompressedNu) &&
+ static_cast<int>(DimLevelType::CompressedNu) &&
static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NO) ==
- static_cast<int>(
- SparseTensorEncodingAttr::DimLevelType::CompressedNo) &&
+ static_cast<int>(DimLevelType::CompressedNo) &&
static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU_NO) ==
- static_cast<int>(
- SparseTensorEncodingAttr::DimLevelType::CompressedNuNo) &&
+ static_cast<int>(DimLevelType::CompressedNuNo) &&
static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON) ==
- static_cast<int>(
- SparseTensorEncodingAttr::DimLevelType::Singleton) &&
+ static_cast<int>(DimLevelType::Singleton) &&
static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU) ==
- static_cast<int>(
- SparseTensorEncodingAttr::DimLevelType::SingletonNu) &&
+ static_cast<int>(DimLevelType::SingletonNu) &&
static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NO) ==
- static_cast<int>(
- SparseTensorEncodingAttr::DimLevelType::SingletonNo) &&
+ static_cast<int>(DimLevelType::SingletonNo) &&
static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU_NO) ==
- static_cast<int>(
- SparseTensorEncodingAttr::DimLevelType::SingletonNuNo),
+ static_cast<int>(DimLevelType::SingletonNuNo),
"MlirSparseTensorDimLevelType (C-API) and DimLevelType (C++) mismatch");
bool mlirAttributeIsASparseTensorEncodingAttr(MlirAttribute attr) {
MlirSparseTensorDimLevelType const *dimLevelTypes,
MlirAffineMap dimOrdering, MlirAffineMap higherOrdering,
int pointerBitWidth, int indexBitWidth) {
- SmallVector<SparseTensorEncodingAttr::DimLevelType> cppDimLevelTypes;
+ SmallVector<DimLevelType> cppDimLevelTypes;
cppDimLevelTypes.resize(numDimLevelTypes);
for (intptr_t i = 0; i < numDimLevelTypes; ++i)
- cppDimLevelTypes[i] =
- static_cast<SparseTensorEncodingAttr::DimLevelType>(dimLevelTypes[i]);
+ cppDimLevelTypes[i] = static_cast<DimLevelType>(dimLevelTypes[i]);
return wrap(SparseTensorEncodingAttr::get(
unwrap(ctx), cppDimLevelTypes, unwrap(dimOrdering),
unwrap(higherOrdering), pointerBitWidth, indexBitWidth));
MLIRIR
MLIRInferTypeOpInterface
MLIRSupport
+ MLIRSparseTensorRuntime
)
if (failed(parser.parseGreater()))
return {};
// Process the data from the parsed dictionary value into struct-like data.
- SmallVector<SparseTensorEncodingAttr::DimLevelType, 4> dlt;
+ SmallVector<DimLevelType, 4> dlt;
AffineMap dimOrd = {};
AffineMap higherOrd = {};
unsigned ptr = 0;
}
auto strVal = strAttr.getValue();
if (strVal == "dense") {
- dlt.push_back(SparseTensorEncodingAttr::DimLevelType::Dense);
+ dlt.push_back(DimLevelType::Dense);
} else if (strVal == "compressed") {
- dlt.push_back(SparseTensorEncodingAttr::DimLevelType::Compressed);
+ dlt.push_back(DimLevelType::Compressed);
} else if (strVal == "compressed-nu") {
- dlt.push_back(SparseTensorEncodingAttr::DimLevelType::CompressedNu);
+ dlt.push_back(DimLevelType::CompressedNu);
} else if (strVal == "compressed-no") {
- dlt.push_back(SparseTensorEncodingAttr::DimLevelType::CompressedNo);
+ dlt.push_back(DimLevelType::CompressedNo);
} else if (strVal == "compressed-nu-no") {
- dlt.push_back(SparseTensorEncodingAttr::DimLevelType::CompressedNuNo);
+ dlt.push_back(DimLevelType::CompressedNuNo);
} else if (strVal == "singleton") {
- dlt.push_back(SparseTensorEncodingAttr::DimLevelType::Singleton);
+ dlt.push_back(DimLevelType::Singleton);
} else if (strVal == "singleton-nu") {
- dlt.push_back(SparseTensorEncodingAttr::DimLevelType::SingletonNu);
+ dlt.push_back(DimLevelType::SingletonNu);
} else if (strVal == "singleton-no") {
- dlt.push_back(SparseTensorEncodingAttr::DimLevelType::SingletonNo);
+ dlt.push_back(DimLevelType::SingletonNo);
} else if (strVal == "singleton-nu-no") {
- dlt.push_back(SparseTensorEncodingAttr::DimLevelType::SingletonNuNo);
+ dlt.push_back(DimLevelType::SingletonNuNo);
} else {
parser.emitError(parser.getNameLoc(),
"unexpected dimension level type: ")
return nullptr;
}
-bool mlir::sparse_tensor::isDenseDim(
- SparseTensorEncodingAttr::DimLevelType dltp) {
- return dltp == SparseTensorEncodingAttr::DimLevelType::Dense;
-}
-
-bool mlir::sparse_tensor::isCompressedDim(
- SparseTensorEncodingAttr::DimLevelType dltp) {
- switch (dltp) {
- case SparseTensorEncodingAttr::DimLevelType::Compressed:
- case SparseTensorEncodingAttr::DimLevelType::CompressedNu:
- case SparseTensorEncodingAttr::DimLevelType::CompressedNo:
- case SparseTensorEncodingAttr::DimLevelType::CompressedNuNo:
- return true;
- default:
- return false;
- }
-}
-
-bool mlir::sparse_tensor::isSingletonDim(
- SparseTensorEncodingAttr::DimLevelType dltp) {
- switch (dltp) {
- case SparseTensorEncodingAttr::DimLevelType::Singleton:
- case SparseTensorEncodingAttr::DimLevelType::SingletonNu:
- case SparseTensorEncodingAttr::DimLevelType::SingletonNo:
- case SparseTensorEncodingAttr::DimLevelType::SingletonNuNo:
- return true;
- default:
- return false;
- }
-}
-
-bool mlir::sparse_tensor::isDenseDim(RankedTensorType type, uint64_t d) {
- assert(d < static_cast<uint64_t>(type.getRank()));
- if (auto enc = getSparseTensorEncoding(type))
- return isDenseDim(enc.getDimLevelType()[d]);
- return true; // unannotated tensor is dense
-}
-
-bool mlir::sparse_tensor::isCompressedDim(RankedTensorType type, uint64_t d) {
- assert(d < static_cast<uint64_t>(type.getRank()));
- if (auto enc = getSparseTensorEncoding(type))
- return isCompressedDim(enc.getDimLevelType()[d]);
- return false; // unannotated tensor is dense
-}
-
-bool mlir::sparse_tensor::isSingletonDim(RankedTensorType type, uint64_t d) {
- assert(d < static_cast<uint64_t>(type.getRank()));
- if (auto enc = getSparseTensorEncoding(type))
- return isSingletonDim(enc.getDimLevelType()[d]);
- return false; // unannotated tensor is dense
-}
-
-bool mlir::sparse_tensor::isOrderedDim(
- SparseTensorEncodingAttr::DimLevelType dltp) {
- switch (dltp) {
- case SparseTensorEncodingAttr::DimLevelType::CompressedNo:
- case SparseTensorEncodingAttr::DimLevelType::CompressedNuNo:
- case SparseTensorEncodingAttr::DimLevelType::SingletonNo:
- case SparseTensorEncodingAttr::DimLevelType::SingletonNuNo:
- return false;
- default:
- return true;
- }
-}
-
-bool mlir::sparse_tensor::isUniqueDim(
- SparseTensorEncodingAttr::DimLevelType dltp) {
- switch (dltp) {
- case SparseTensorEncodingAttr::DimLevelType::CompressedNu:
- case SparseTensorEncodingAttr::DimLevelType::CompressedNuNo:
- case SparseTensorEncodingAttr::DimLevelType::SingletonNu:
- case SparseTensorEncodingAttr::DimLevelType::SingletonNuNo:
- return false;
- default:
- return true;
- }
-}
-
-bool mlir::sparse_tensor::isOrderedDim(RankedTensorType type, uint64_t d) {
- assert(d < static_cast<uint64_t>(type.getRank()));
- if (auto enc = getSparseTensorEncoding(type))
- return isOrderedDim(enc.getDimLevelType()[d]);
- return true; // unannotated tensor is dense (and thus ordered)
-}
-
-bool mlir::sparse_tensor::isUniqueDim(RankedTensorType type, uint64_t d) {
- assert(d < static_cast<uint64_t>(type.getRank()));
- if (auto enc = getSparseTensorEncoding(type))
- return isUniqueDim(enc.getDimLevelType()[d]);
- return true; // unannotated tensor is dense (and thus unique)
-}
-
uint64_t mlir::sparse_tensor::toOrigDim(const SparseTensorEncodingAttr &enc,
uint64_t d) {
if (enc) {
for (auto dimTp : enc.getDimLevelType())
dims[i].push_back(dimTp);
else
- dims[i].assign(rank, SparseTensorEncodingAttr::DimLevelType::Dense);
+ dims[i].assign(rank, DimLevelType::Dense);
// Initialize using empty value.
pidxs[i].assign(rank, Value());
assert(!ptrBuffer[t][d] && !idxBuffer[t][d] && !sizes[t][d] &&
!highs[t][d]);
// Handle sparse storage schemes.
- if (isCompressedDim(dims[t][d])) {
+ if (isCompressedDLT(dims[t][d])) {
auto ptrTp =
MemRefType::get(dynShape, getPointerOverheadType(builder, enc));
auto indTp =
// Generate sparse primitives to obtains pointer and indices.
ptrBuffer[t][d] = builder.create<ToPointersOp>(loc, ptrTp, tensor, dim);
idxBuffer[t][d] = builder.create<ToIndicesOp>(loc, indTp, tensor, dim);
- } else if (isSingletonDim(dims[t][d])) {
+ } else if (isSingletonDLT(dims[t][d])) {
// Singleton dimension, fetch indices.
auto indTp =
MemRefType::get(dynShape, getIndexOverheadType(builder, enc));
idxBuffer[t][d] = builder.create<ToIndicesOp>(loc, indTp, tensor, dim);
} else {
// Dense dimension, nothing to fetch.
- assert(isDenseDim(dims[t][d]));
+ assert(isDenseDLT(dims[t][d]));
}
// Find upper bound in current dimension.
assert(!coord[tid][dim]);
Value step = constantIndex(builder, loc, 1);
auto dimType = dims[tid][dim];
- bool isSparse = isCompressedDim(dimType) || isSingletonDim(dimType);
- assert(isDenseDim(dimType) || isCompressedDim(dimType) ||
- isSingletonDim(dimType));
+ bool isSparse = isCompressedDLT(dimType) || isSingletonDLT(dimType);
+ assert(isDenseDLT(dimType) || isCompressedDLT(dimType) ||
+ isSingletonDLT(dimType));
Value lo = isSparse ? pidxs[tid][dim] : constantIndex(builder, loc, 0);
Value hi = highs[tid][dim];
assert(dims[tid].size() > dim);
auto dimType = dims[tid][dim];
- if (isDenseDim(dimType))
+ if (isDenseDLT(dimType))
return false;
// Either the first dimension, or the previous dimension has been set.
assert(dim == 0 || pidxs[tid][dim - 1]);
Value c0 = constantIndex(builder, loc, 0);
Value c1 = constantIndex(builder, loc, 1);
- if (isCompressedDim(dimType)) {
+ if (isCompressedDLT(dimType)) {
Value ptr = ptrBuffer[tid][dim];
Value pLo = dim == 0 ? c0 : pidxs[tid][dim - 1];
highs[tid][dim] = genIndexLoad(builder, loc, ptr, pHi);
return true;
}
- if (isSingletonDim(dimType)) {
+ if (isSingletonDLT(dimType)) {
Value pLo = dim == 0 ? c0 : pidxs[tid][dim - 1];
Value pHi = builder.create<arith::AddIOp>(loc, pLo, c1);
// Reset to null.
pidxs[tid][dim] = Value();
coord[tid][dim] = Value();
- if (!isDenseDim(dims[tid][dim]))
+ if (!isDenseDLT(dims[tid][dim]))
// Dense dimension, high is fixed.
highs[tid][dim] = Value();
}
return primaryTypeFunctionSuffix(primaryTypeEncoding(elemTp));
}
-DimLevelType mlir::sparse_tensor::dimLevelTypeEncoding(
- SparseTensorEncodingAttr::DimLevelType dlt) {
- switch (dlt) {
- case SparseTensorEncodingAttr::DimLevelType::Dense:
- return DimLevelType::Dense;
- case SparseTensorEncodingAttr::DimLevelType::Compressed:
- return DimLevelType::Compressed;
- case SparseTensorEncodingAttr::DimLevelType::CompressedNu:
- return DimLevelType::CompressedNu;
- case SparseTensorEncodingAttr::DimLevelType::CompressedNo:
- return DimLevelType::CompressedNo;
- case SparseTensorEncodingAttr::DimLevelType::CompressedNuNo:
- return DimLevelType::CompressedNuNo;
- case SparseTensorEncodingAttr::DimLevelType::Singleton:
- return DimLevelType::Singleton;
- case SparseTensorEncodingAttr::DimLevelType::SingletonNu:
- return DimLevelType::SingletonNu;
- case SparseTensorEncodingAttr::DimLevelType::SingletonNo:
- return DimLevelType::SingletonNo;
- case SparseTensorEncodingAttr::DimLevelType::SingletonNuNo:
- return DimLevelType::SingletonNuNo;
- }
- llvm_unreachable("Unknown SparseTensorEncodingAttr::DimLevelType");
-}
-
//===----------------------------------------------------------------------===//
// Misc code generators.
//===----------------------------------------------------------------------===//
/// Input (TODO: and output) tensors.
std::vector<Value> tensors;
/// The dim type array for each tensor.
- std::vector<std::vector<SparseTensorEncodingAttr::DimLevelType>> dims;
+ std::vector<std::vector<DimLevelType>> dims;
/// Sparse iteration information (by tensor and dim). These arrays
/// are updated to remain current within the current loop.
std::vector<std::vector<Value>> pidxs;
/// Converts a primary storage type to its function-name suffix.
StringRef primaryTypeFunctionSuffix(Type elemTp);
-/// Converts the IR's dimension level type to its internal type-encoding.
-DimLevelType dimLevelTypeEncoding(SparseTensorEncodingAttr::DimLevelType dlt);
-
//===----------------------------------------------------------------------===//
// Misc code generators and utilities.
//===----------------------------------------------------------------------===//
}
/// Generates a constant of the internal dimension level type encoding.
-inline Value
-constantDimLevelTypeEncoding(OpBuilder &builder, Location loc,
- SparseTensorEncodingAttr::DimLevelType dlt) {
- return constantI8(builder, loc,
- static_cast<uint8_t>(dimLevelTypeEncoding(dlt)));
+inline Value constantDimLevelTypeEncoding(OpBuilder &builder, Location loc,
+ DimLevelType dlt) {
+ return constantI8(builder, loc, static_cast<uint8_t>(dlt));
}
} // namespace sparse_tensor
Location loc, ShapedType stp,
SparseTensorEncodingAttr &enc, Action action,
ValueRange szs, Value ptr = Value()) {
- ArrayRef<SparseTensorEncodingAttr::DimLevelType> dlt = enc.getDimLevelType();
+ ArrayRef<DimLevelType> dlt = enc.getDimLevelType();
unsigned sz = dlt.size();
// Sparsity annotations.
SmallVector<Value, 4> attrs;
/// Determine if the runtime library supports direct conversion to the
/// given target `dimTypes`.
-static bool canUseDirectConversion(
- ArrayRef<SparseTensorEncodingAttr::DimLevelType> dimTypes) {
+static bool canUseDirectConversion(ArrayRef<DimLevelType> dimTypes) {
bool alreadyCompressed = false;
for (uint64_t rank = dimTypes.size(), r = 0; r < rank; r++) {
- const DimLevelType dlt = dimLevelTypeEncoding(dimTypes[r]);
+ const DimLevelType dlt = dimTypes[r];
if (isCompressedDLT(dlt)) {
if (alreadyCompressed)
return false; // Multiple compressed dimensions not yet supported.
// The dimLevelTypes aren't actually used by Action::kToIterator.
encDst = SparseTensorEncodingAttr::get(
op->getContext(),
- SmallVector<SparseTensorEncodingAttr::DimLevelType>(
- rank, SparseTensorEncodingAttr::DimLevelType::Dense),
- AffineMap(), AffineMap(), encSrc.getPointerBitWidth(),
- encSrc.getIndexBitWidth());
+ SmallVector<DimLevelType>(rank, DimLevelType::Dense), AffineMap(),
+ AffineMap(), encSrc.getPointerBitWidth(), encSrc.getIndexBitWidth());
SmallVector<Value, 4> sizes;
SmallVector<Value, 8> params;
sizesFromPtr(rewriter, sizes, loc, encSrc, srcTensorTp, src);
// Helper to detect a sparse tensor type operand.
static bool isSparseTensor(OpOperand *op) {
if (auto enc = getSparseTensorEncoding(op->get().getType())) {
- if (llvm::is_contained(enc.getDimLevelType(),
- SparseTensorEncodingAttr::DimLevelType::Compressed))
+ if (llvm::is_contained(enc.getDimLevelType(), DimLevelType::Compressed))
return true;
}
return false;
static RankedTensorType getUnorderedCOOFromType(RankedTensorType src) {
auto *ctx = src.getContext();
auto rank = src.getRank();
- SmallVector<SparseTensorEncodingAttr::DimLevelType, 4> dims;
+ SmallVector<DimLevelType, 4> dims;
// An unordered and non-unique compressed dim at beginning unless the tensor
// is a 1D tensor.
if (rank > 1)
- dims.push_back(SparseTensorEncodingAttr::DimLevelType::CompressedNuNo);
+ dims.push_back(DimLevelType::CompressedNuNo);
// TODO: it is actually ordered at the level for ordered input.
// Followed by unordered non-unique n-2 singleton levels.
- std::fill_n(std::back_inserter(dims), rank - 2,
- SparseTensorEncodingAttr::DimLevelType::SingletonNuNo);
+ std::fill_n(std::back_inserter(dims), rank - 2, DimLevelType::SingletonNuNo);
// TODO: only if all the inputs (for concatentate) are unique at the last
// level should the COO has a unique level at the end. Ends by a unordered
// unique singleton level.
- dims.push_back(SparseTensorEncodingAttr::DimLevelType::SingletonNo);
+ dims.push_back(DimLevelType::SingletonNo);
SparseTensorEncodingAttr encSrc = getSparseTensorEncoding(src);
// TODO: Maybe pick the bitwidth based on input/output tensors (probably the
// largest one among them) in the original operation instead of using the
unsigned d) {
if (enc) {
switch (enc.getDimLevelType()[d]) {
- case SparseTensorEncodingAttr::DimLevelType::Dense:
+ case DimLevelType::Dense:
return DimLevelFormat(DimLvlType::kDense);
- case SparseTensorEncodingAttr::DimLevelType::Compressed:
+ case DimLevelType::Compressed:
return DimLevelFormat(DimLvlType::kCompressed);
- case SparseTensorEncodingAttr::DimLevelType::CompressedNu:
+ case DimLevelType::CompressedNu:
return DimLevelFormat(DimLvlType::kCompressed, true, false);
- case SparseTensorEncodingAttr::DimLevelType::CompressedNo:
+ case DimLevelType::CompressedNo:
return DimLevelFormat(DimLvlType::kCompressed, false, true);
- case SparseTensorEncodingAttr::DimLevelType::CompressedNuNo:
+ case DimLevelType::CompressedNuNo:
return DimLevelFormat(DimLvlType::kCompressed, false, false);
- case SparseTensorEncodingAttr::DimLevelType::Singleton:
+ case DimLevelType::Singleton:
return DimLevelFormat(DimLvlType::kSingleton);
- case SparseTensorEncodingAttr::DimLevelType::SingletonNu:
+ case DimLevelType::SingletonNu:
return DimLevelFormat(DimLvlType::kSingleton, true, false);
- case SparseTensorEncodingAttr::DimLevelType::SingletonNo:
+ case DimLevelType::SingletonNo:
return DimLevelFormat(DimLvlType::kSingleton, false, true);
- case SparseTensorEncodingAttr::DimLevelType::SingletonNuNo:
+ case DimLevelType::SingletonNuNo:
return DimLevelFormat(DimLvlType::kSingleton, false, false);
}
}
":InferTypeOpInterface",
":SparseTensorAttrDefsIncGen",
":SparseTensorOpsIncGen",
+ ":SparseTensorRuntime",
"//llvm:Support",
],
)