namespace mlir {
namespace sparse_tensor {
+// TODO: benchmark whether to keep various methods inline vs moving them
+// off to the cpp file.
+
+// TODO: consider distinguishing separate classes for before vs
+// after reading the header; so as to statically avoid the need
+// to `assert(isValid())`.
+
/// This class abstracts over the information stored in file headers,
/// as well as providing the buffers and methods for parsing those headers.
class SparseTensorFile final {
kUndefined = 5
};
- explicit SparseTensorFile(char *filename) : filename(filename) {
+ explicit SparseTensorFile(const char *filename) : filename(filename) {
assert(filename && "Received nullptr for filename");
}
/// Safely gets the size of the given dimension. Is only valid
/// after parsing the header.
uint64_t getDimSize(uint64_t d) const {
- assert(d < getRank());
+ assert(d < getRank() && "Dimension out of bounds");
return idata[2 + d];
}
void readExtFROSTTHeader();
static constexpr int kColWidth = 1025;
- const char *filename;
+ const char *const filename;
FILE *file = nullptr;
ValueKind valueKind_ = ValueKind::kInvalid;
bool isSymmetric_ = false;
/// sparse tensor in coordinate scheme.
template <typename V>
inline SparseTensorCOO<V> *
-openSparseTensorCOO(char *filename, uint64_t rank, const uint64_t *shape,
+openSparseTensorCOO(const char *filename, uint64_t rank, const uint64_t *shape,
const uint64_t *perm, PrimaryType valTp) {
SparseTensorFile stfile(filename);
stfile.openFile();
perm, nnz);
// Read all nonzero elements.
std::vector<uint64_t> indices(rank);
- for (uint64_t k = 0; k < nnz; k++) {
+ for (uint64_t k = 0; k < nnz; ++k) {
char *linePtr = stfile.readLine();
- for (uint64_t r = 0; r < rank; r++) {
+ for (uint64_t r = 0; r < rank; ++r) {
+ // Parse the 1-based index.
uint64_t idx = strtoul(linePtr, &linePtr, 10);
- // Add 0-based index.
+ // Add the 0-based index.
indices[perm[r]] = idx - 1;
}
detail::readCOOValue(coo, indices, &linePtr, stfile.isPattern(),
return coo;
}
-/// Writes the sparse tensor to `dest` in extended FROSTT format.
+/// Writes the sparse tensor to `filename` in extended FROSTT format.
template <typename V>
-inline void outSparseTensor(void *tensor, void *dest, bool sort) {
- assert(tensor && dest);
- auto coo = static_cast<SparseTensorCOO<V> *>(tensor);
- if (sort)
- coo->sort();
- char *filename = static_cast<char *>(dest);
- auto &dimSizes = coo->getDimSizes();
- auto &elements = coo->getElements();
- uint64_t rank = coo->getRank();
- uint64_t nnz = elements.size();
+inline void writeExtFROSTT(const SparseTensorCOO<V> &coo,
+ const char *filename) {
+ assert(filename && "Got nullptr for filename");
+ auto &dimSizes = coo.getDimSizes();
+ auto &elements = coo.getElements();
+ const uint64_t rank = coo.getRank();
+ const uint64_t nnz = elements.size();
std::fstream file;
file.open(filename, std::ios_base::out | std::ios_base::trunc);
assert(file.is_open());
file << "; extended FROSTT format\n" << rank << " " << nnz << std::endl;
- for (uint64_t r = 0; r < rank - 1; r++)
+ for (uint64_t r = 0; r < rank - 1; ++r)
file << dimSizes[r] << " ";
file << dimSizes[rank - 1] << std::endl;
- for (uint64_t i = 0; i < nnz; i++) {
+ for (uint64_t i = 0; i < nnz; ++i) {
auto &idx = elements[i].indices;
- for (uint64_t r = 0; r < rank; r++)
+ for (uint64_t r = 0; r < rank; ++r)
file << (idx[r] + 1) << " ";
file << elements[i].value << std::endl;
}
namespace mlir {
namespace sparse_tensor {
+//===----------------------------------------------------------------------===//
// This forward decl is sufficient to split `SparseTensorStorageBase` into
// its own header, but isn't sufficient for `SparseTensorStorage` to join it.
template <typename V>
class SparseTensorEnumeratorBase;
+// These macros ensure consistent error messages, without risk of incuring
+// an additional method call to do so.
+#define ASSERT_VALID_DIM(d) \
+ assert(d < getRank() && "Dimension index is out of bounds");
+#define ASSERT_COMPRESSED_DIM(d) \
+ assert(isCompressedDim(d) && "Dimension is not compressed");
+#define ASSERT_DENSE_DIM(d) assert(isDenseDim(d) && "Dimension is not dense");
+
/// Abstract base class for `SparseTensorStorage<P,I,V>`. This class
/// takes responsibility for all the `<P,I,V>`-independent aspects
/// of the tensor (e.g., shape, sparsity, permutation). In addition,
/// Safely lookup the size of the given (storage-order) dimension.
uint64_t getDimSize(uint64_t d) const {
- assert(d < getRank());
+ ASSERT_VALID_DIM(d);
return dimSizes[d];
}
/// Safely check if the (storage-order) dimension uses dense storage.
bool isDenseDim(uint64_t d) const {
- assert(d < getRank());
+ ASSERT_VALID_DIM(d);
return dimTypes[d] == DimLevelType::kDense;
}
/// Safely check if the (storage-order) dimension uses compressed storage.
bool isCompressedDim(uint64_t d) const {
- assert(d < getRank());
+ ASSERT_VALID_DIM(d);
switch (dimTypes[d]) {
case DimLevelType::kCompressed:
case DimLevelType::kCompressedNu:
/// Safely check if the (storage-order) dimension uses singleton storage.
bool isSingletonDim(uint64_t d) const {
- assert(d < getRank());
+ ASSERT_VALID_DIM(d);
switch (dimTypes[d]) {
case DimLevelType::kSingleton:
case DimLevelType::kSingletonNu:
/// Safely check if the (storage-order) dimension is ordered.
bool isOrderedDim(uint64_t d) const {
- assert(d < getRank());
+ ASSERT_VALID_DIM(d);
switch (dimTypes[d]) {
case DimLevelType::kCompressedNo:
case DimLevelType::kCompressedNuNo:
/// Safely check if the (storage-order) dimension is unique.
bool isUniqueDim(uint64_t d) const {
- assert(d < getRank());
+ ASSERT_VALID_DIM(d);
switch (dimTypes[d]) {
case DimLevelType::kCompressedNu:
case DimLevelType::kCompressedNuNo:
private:
const std::vector<uint64_t> dimSizes;
- std::vector<uint64_t> rev;
+ std::vector<uint64_t> rev; // conceptually `const`
const std::vector<DimLevelType> dimTypes;
};
+//===----------------------------------------------------------------------===//
// This forward decl is necessary for defining `SparseTensorStorage`,
// but isn't sufficient for splitting it off.
template <typename P, typename I, typename V>
/// Partially specialize these getter methods based on template types.
void getPointers(std::vector<P> **out, uint64_t d) final {
- assert(d < getRank());
+ ASSERT_VALID_DIM(d);
*out = &pointers[d];
}
void getIndices(std::vector<I> **out, uint64_t d) final {
- assert(d < getRank());
+ ASSERT_VALID_DIM(d);
*out = &indices[d];
}
void getValues(std::vector<V> **out) final { *out = &values; }
// Restore insertion path for first insert.
const uint64_t lastDim = getRank() - 1;
uint64_t index = added[0];
+ assert(filled[index] && "added index is not filled");
cursor[lastDim] = index;
lexInsert(cursor, values[index]);
- assert(filled[index]);
values[index] = 0;
filled[index] = false;
// Subsequent insertions are quick.
- for (uint64_t i = 1; i < count; i++) {
+ for (uint64_t i = 1; i < count; ++i) {
assert(index < added[i] && "non-lexicographic insertion");
index = added[i];
+ assert(filled[index] && "added index is not filled");
cursor[lastDim] = index;
insPath(cursor, lastDim, added[i - 1] + 1, values[index]);
- assert(filled[index]);
values[index] = 0;
filled[index] = false;
}
/// does not check that `pos` is semantically valid (i.e., larger than
/// the previous position and smaller than `indices[d].capacity()`).
void appendPointer(uint64_t d, uint64_t pos, uint64_t count = 1) {
- assert(isCompressedDim(d));
+ ASSERT_COMPRESSED_DIM(d);
assert(pos <= std::numeric_limits<P>::max() &&
"Pointer value is too large for the P-type");
pointers[d].insert(pointers[d].end(), count, static_cast<P>(pos));
"Index value is too large for the I-type");
indices[d].push_back(static_cast<I>(i));
} else { // Dense dimension.
- assert(isDenseDim(d));
+ ASSERT_DENSE_DIM(d);
assert(i >= full && "Index was already filled");
if (i == full)
return; // Short-circuit, since it'll be a nop.
/// does not check that `i` is semantically valid (i.e., in bounds
/// for `dimSizes[d]` and not elsewhere occurring in the same segment).
void writeIndex(uint64_t d, uint64_t pos, uint64_t i) {
- assert(isCompressedDim(d));
+ ASSERT_COMPRESSED_DIM(d);
// Subscript assignment to `std::vector` requires that the `pos`-th
// entry has been initialized; thus we must be sure to check `size()`
// here, instead of `capacity()` as would be ideal.
/// and pointwise less-than).
void fromCOO(const std::vector<Element<V>> &elements, uint64_t lo,
uint64_t hi, uint64_t d) {
- uint64_t rank = getRank();
+ const uint64_t rank = getRank();
assert(d <= rank && hi <= elements.size());
// Once dimensions are exhausted, insert the numerical values.
if (d == rank) {
uint64_t full = 0;
while (lo < hi) { // If `hi` is unchanged, then `lo < elements.size()`.
// Find segment in interval with same index elements in this dimension.
- uint64_t i = elements[lo].indices[d];
+ const uint64_t i = elements[lo].indices[d];
uint64_t seg = lo + 1;
- bool merge = isUniqueDim(d);
- while (merge && seg < hi && elements[seg].indices[d] == i)
- seg++;
+ if (isUniqueDim(d))
+ while (seg < hi && elements[seg].indices[d] == i)
+ ++seg;
// Handle segment in interval for sparse or dense dimension.
appendIndex(d, full, i);
full = i + 1;
} else if (isSingletonDim(d)) {
return;
} else { // Dense dimension.
- assert(isDenseDim(d));
+ ASSERT_DENSE_DIM(d);
const uint64_t sz = getDimSizes()[d];
assert(sz >= full && "Segment is overfull");
count = detail::checkedMul(count, sz - full);
/// Wraps up a single insertion path, inner to outer.
void endPath(uint64_t diff) {
- uint64_t rank = getRank();
- assert(diff <= rank);
- for (uint64_t i = 0; i < rank - diff; i++) {
+ const uint64_t rank = getRank();
+ assert(diff <= rank && "Dimension-diff is out of bounds");
+ for (uint64_t i = 0; i < rank - diff; ++i) {
const uint64_t d = rank - i - 1;
finalizeSegment(d, idx[d] + 1);
}
/// Continues a single insertion path, outer to inner.
void insPath(const uint64_t *cursor, uint64_t diff, uint64_t top, V val) {
- uint64_t rank = getRank();
- assert(diff < rank);
- for (uint64_t d = diff; d < rank; d++) {
- uint64_t i = cursor[d];
+ ASSERT_VALID_DIM(diff);
+ const uint64_t rank = getRank();
+ for (uint64_t d = diff; d < rank; ++d) {
+ const uint64_t i = cursor[d];
appendIndex(d, top, i);
top = 0;
idx[d] = i;
/// Finds the lexicographic differing dimension.
uint64_t lexDiff(const uint64_t *cursor) const {
- for (uint64_t r = 0, rank = getRank(); r < rank; r++)
+ const uint64_t rank = getRank();
+ for (uint64_t r = 0; r < rank; ++r)
if (cursor[r] > idx[r])
return r;
else
std::vector<uint64_t> idx; // index cursor for lexicographic insertion.
};
+#undef ASSERT_COMPRESSED_DIM
+#undef ASSERT_VALID_DIM
+
+//===----------------------------------------------------------------------===//
/// A (higher-order) function object for enumerating the elements of some
/// `SparseTensorStorage` under a permutation. That is, the `forallElements`
/// method encapsulates the loop-nest for enumerating the elements of
assert(rank == getRank() && "Permutation rank mismatch");
const auto &rev = src.getRev(); // source-order -> semantic-order
const auto &dimSizes = src.getDimSizes(); // in source storage-order
- for (uint64_t s = 0; s < rank; s++) { // `s` source storage-order
+ for (uint64_t s = 0; s < rank; ++s) { // `s` source storage-order
uint64_t t = perm[rev[s]]; // `t` target-order
reord[s] = t;
permsz[t] = dimSizes[s];
std::vector<uint64_t> cursor; // in target order.
};
+//===----------------------------------------------------------------------===//
template <typename P, typename I, typename V>
class SparseTensorEnumerator final : public SparseTensorEnumeratorBase<V> {
using Base = SparseTensorEnumeratorBase<V>;
+ using StorageImpl = SparseTensorStorage<P, I, V>;
public:
/// Constructs an enumerator with the given permutation for mapping
/// the semantic-ordering of dimensions to the desired target-ordering.
///
/// Precondition: `perm` must be valid for `rank`.
- SparseTensorEnumerator(const SparseTensorStorage<P, I, V> &tensor,
- uint64_t rank, const uint64_t *perm)
+ SparseTensorEnumerator(const StorageImpl &tensor, uint64_t rank,
+ const uint64_t *perm)
: Base(tensor, rank, perm) {}
~SparseTensorEnumerator() final = default;
void forallElements(ElementConsumer<V> yield, uint64_t parentPos,
uint64_t d) {
// Recover the `<P,I,V>` type parameters of `src`.
- const auto &src =
- static_cast<const SparseTensorStorage<P, I, V> &>(this->src);
+ const auto &src = static_cast<const StorageImpl &>(this->src);
if (d == Base::getRank()) {
assert(parentPos < src.values.size() &&
"Value position is out of bounds");
const std::vector<I> &indicesD = src.indices[d];
assert(pstop <= indicesD.size() && "Index position is out of bounds");
uint64_t &cursorReordD = this->cursor[this->reord[d]];
- for (uint64_t pos = pstart; pos < pstop; pos++) {
+ for (uint64_t pos = pstart; pos < pstop; ++pos) {
cursorReordD = static_cast<uint64_t>(indicesD[pos]);
forallElements(yield, pos, d + 1);
}
} else if (src.isSingletonDim(d)) {
MLIR_SPARSETENSOR_FATAL("unsupported dimension level type");
} else { // Dense dimension.
- assert(src.isDenseDim(d));
+ assert(src.isDenseDim(d)); // TODO: reuse the ASSERT_DENSE_DIM message
const uint64_t sz = src.getDimSizes()[d];
const uint64_t pstart = parentPos * sz;
uint64_t &cursorReordD = this->cursor[this->reord[d]];
- for (uint64_t i = 0; i < sz; i++) {
+ for (uint64_t i = 0; i < sz; ++i) {
cursorReordD = i;
forallElements(yield, pstart + i, d + 1);
}
}
};
+//===----------------------------------------------------------------------===//
/// Statistics regarding the number of nonzero subtensors in
/// a source tensor, for direct sparse=>sparse conversion a la
/// <https://arxiv.org/abs/2001.02609>.
// Definitions of the ctors and factories of `SparseTensorStorage<P,I,V>`.
namespace detail {
-
-// TODO: try to unify this with `SparseTensorFile::assertMatchesShape`
-// which is used by `openSparseTensorCOO`. It's easy enough to resolve
-// the `std::vector` vs pointer mismatch for `dimSizes`; but it's trickier
-// to resolve the presence/absence of `perm` (without introducing extra
-// overhead), so perhaps the code duplication is unavoidable.
-//
/// Asserts that the `dimSizes` (in target-order) under the `perm` (mapping
/// semantic-order to target-order) are a refinement of the desired `shape`
/// (in semantic-order).
///
/// Precondition: `perm` and `shape` must be valid for `rank`.
-inline void assertPermutedSizesMatchShape(const std::vector<uint64_t> &dimSizes,
- uint64_t rank, const uint64_t *perm,
- const uint64_t *shape) {
- assert(perm && shape);
- assert(rank == dimSizes.size() && "Rank mismatch");
- for (uint64_t r = 0; r < rank; r++)
- assert((shape[r] == 0 || shape[r] == dimSizes[perm[r]]) &&
- "Dimension size mismatch");
-}
-
+void assertPermutedSizesMatchShape(const std::vector<uint64_t> &dimSizes,
+ uint64_t rank, const uint64_t *perm,
+ const uint64_t *shape);
} // namespace detail
template <typename P, typename I, typename V>
SparseTensorStorage<P, I, V> *SparseTensorStorage<P, I, V>::newSparseTensor(
uint64_t rank, const uint64_t *shape, const uint64_t *perm,
const DimLevelType *sparsity, SparseTensorCOO<V> *coo) {
- SparseTensorStorage<P, I, V> *n = nullptr;
if (coo) {
const auto &coosz = coo->getDimSizes();
+#ifndef NDEBUG
detail::assertPermutedSizesMatchShape(coosz, rank, perm, shape);
- n = new SparseTensorStorage<P, I, V>(coosz, perm, sparsity, coo);
- } else {
- std::vector<uint64_t> permsz(rank);
- for (uint64_t r = 0; r < rank; r++) {
- assert(shape[r] > 0 && "Dimension size zero has trivial storage");
- permsz[perm[r]] = shape[r];
- }
- // We pass the null `coo` to ensure we select the intended constructor.
- n = new SparseTensorStorage<P, I, V>(permsz, perm, sparsity, coo);
+#endif
+ return new SparseTensorStorage<P, I, V>(coosz, perm, sparsity, coo);
+ }
+ // else
+ std::vector<uint64_t> permsz(rank);
+ for (uint64_t r = 0; r < rank; ++r) {
+ assert(shape[r] > 0 && "Dimension size zero has trivial storage");
+ permsz[perm[r]] = shape[r];
}
- return n;
+ // We pass the null `coo` to ensure we select the intended constructor.
+ return new SparseTensorStorage<P, I, V>(permsz, perm, sparsity, coo);
}
template <typename P, typename I, typename V>
SparseTensorEnumeratorBase<V> *enumerator;
source->newEnumerator(&enumerator, rank, perm);
const auto &permsz = enumerator->permutedSizes();
+#ifndef NDEBUG
detail::assertPermutedSizesMatchShape(permsz, rank, perm, shape);
+#endif
auto *tensor =
new SparseTensorStorage<P, I, V>(permsz, perm, sparsity, *source);
delete enumerator;
// we should really use nnz and dense/sparse distribution.
bool allDense = true;
uint64_t sz = 1;
- for (uint64_t r = 0, rank = getRank(); r < rank; r++) {
+ for (uint64_t r = 0, rank = getRank(); r < rank; ++r) {
if (isCompressedDim(r)) {
// TODO: Take a parameter between 1 and `dimSizes[r]`, and multiply
// `sz` by that before reserving. (For now we just use 1.)
sz = 1;
allDense = false;
} else { // Dense dimension.
- assert(isDenseDim(r));
+ ASSERT_DENSE_DIM(r);
sz = detail::checkedMul(sz, getDimSizes()[r]);
}
}
nnz.initialize(*enumerator);
// Initialize "pointers" overhead (and allocate "indices", "values").
uint64_t parentSz = 1; // assembled-size (not dimension-size) of `r-1`.
- for (uint64_t rank = getRank(), r = 0; r < rank; r++) {
+ for (uint64_t rank = getRank(), r = 0; r < rank; ++r) {
if (isCompressedDim(r)) {
pointers[r].reserve(parentSz + 1);
pointers[r].push_back(0);
// The yieldPos loop
enumerator->forallElements([this](const std::vector<uint64_t> &ind, V val) {
uint64_t parentSz = 1, parentPos = 0;
- for (uint64_t rank = getRank(), r = 0; r < rank; r++) {
+ for (uint64_t rank = getRank(), r = 0; r < rank; ++r) {
if (isCompressedDim(r)) {
// If `parentPos == parentSz` then it's valid as an array-lookup;
// however, it's semantically invalid here since that entry
} else if (isSingletonDim(r)) {
// the new parentPos equals the old parentPos.
} else { // Dense dimension.
- assert(isDenseDim(r));
+ ASSERT_DENSE_DIM(r);
parentPos = parentPos * getDimSizes()[r] + ind[r];
}
parentSz = assembledSize(parentSz, r);
// No longer need the enumerator, so we'll delete it ASAP.
delete enumerator;
// The finalizeYieldPos loop
- for (uint64_t parentSz = 1, rank = getRank(), r = 0; r < rank; r++) {
+ for (uint64_t parentSz = 1, rank = getRank(), r = 0; r < rank; ++r) {
if (isCompressedDim(r)) {
assert(parentSz == pointers[r].size() - 1 &&
"Actual pointers size doesn't match the expected size");
assert(pointers[r][parentSz - 1] == pointers[r][parentSz] &&
"Pointers got corrupted");
// TODO: optimize this by using `memmove` or similar.
- for (uint64_t n = 0; n < parentSz; n++) {
+ for (uint64_t n = 0; n < parentSz; ++n) {
const uint64_t parentPos = parentSz - n;
pointers[r][parentPos] = pointers[r][parentPos - 1];
}
} // namespace sparse_tensor
} // namespace mlir
+#undef ASSERT_DENSE_DIM
+
#endif // MLIR_EXECUTIONENGINE_SPARSETENSOR_STORAGE_H
//===----------------------------------------------------------------------===//
#include "mlir/ExecutionEngine/SparseTensorUtils.h"
+
+#ifdef MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS
+
#include "mlir/ExecutionEngine/SparseTensor/COO.h"
#include "mlir/ExecutionEngine/SparseTensor/ErrorHandling.h"
#include "mlir/ExecutionEngine/SparseTensor/File.h"
#include "mlir/ExecutionEngine/SparseTensor/Storage.h"
-#ifdef MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS
-
-#include <algorithm>
-#include <cassert>
-#include <cctype>
-#include <cstdio>
-#include <cstdlib>
-#include <cstring>
-#include <fstream>
-#include <functional>
-#include <iostream>
-#include <limits>
#include <numeric>
using namespace mlir::sparse_tensor;
/// Initializes sparse tensor from an external COO-flavored format.
template <typename V>
static SparseTensorStorage<uint64_t, uint64_t, V> *
-toMLIRSparseTensor(uint64_t rank, uint64_t nse, uint64_t *shape, V *values,
- uint64_t *indices, uint64_t *perm, uint8_t *sparse) {
- const DimLevelType *sparsity = (DimLevelType *)(sparse);
+toMLIRSparseTensor(uint64_t rank, uint64_t nse, const uint64_t *shape,
+ const V *values, const uint64_t *indices,
+ const uint64_t *perm, const DimLevelType *sparsity) {
#ifndef NDEBUG
// Verify that perm is a permutation of 0..(rank-1).
std::vector<uint64_t> order(perm, perm + rank);
/// Converts a sparse tensor to an external COO-flavored format.
template <typename V>
-static void fromMLIRSparseTensor(void *tensor, uint64_t *pRank, uint64_t *pNse,
- uint64_t **pShape, V **pValues,
- uint64_t **pIndices) {
- assert(tensor);
- auto sparseTensor =
- static_cast<SparseTensorStorage<uint64_t, uint64_t, V> *>(tensor);
- uint64_t rank = sparseTensor->getRank();
+static void
+fromMLIRSparseTensor(const SparseTensorStorage<uint64_t, uint64_t, V> *tensor,
+ uint64_t *pRank, uint64_t *pNse, uint64_t **pShape,
+ V **pValues, uint64_t **pIndices) {
+ assert(tensor && "Received nullptr for tensor");
+ uint64_t rank = tensor->getRank();
std::vector<uint64_t> perm(rank);
std::iota(perm.begin(), perm.end(), 0);
- SparseTensorCOO<V> *coo = sparseTensor->toCOO(perm.data());
+ SparseTensorCOO<V> *coo = tensor->toCOO(perm.data());
const std::vector<Element<V>> &elements = coo->getElements();
uint64_t nse = elements.size();
#define IMPL_OUTSPARSETENSOR(VNAME, V) \
void outSparseTensor##VNAME(void *coo, void *dest, bool sort) { \
- return outSparseTensor<V>(coo, dest, sort); \
+ assert(coo && "Got nullptr for COO object"); \
+ auto &coo_ = *static_cast<SparseTensorCOO<V> *>(coo); \
+ if (sort) \
+ coo_.sort(); \
+ return writeExtFROSTT(coo_, static_cast<char *>(dest)); \
}
FOREVERY_V(IMPL_OUTSPARSETENSOR)
#undef IMPL_OUTSPARSETENSOR
out->assign(dimSizes, dimSizes + rank);
}
+// We can't use `static_cast` here because `DimLevelType` is an enum-class.
// TODO: generalize beyond 64-bit indices.
#define IMPL_CONVERTTOMLIRSPARSETENSOR(VNAME, V) \
void *convertToMLIRSparseTensor##VNAME( \
uint64_t rank, uint64_t nse, uint64_t *shape, V *values, \
uint64_t *indices, uint64_t *perm, uint8_t *sparse) { \
return toMLIRSparseTensor<V>(rank, nse, shape, values, indices, perm, \
- sparse); \
+ reinterpret_cast<DimLevelType *>(sparse)); \
}
FOREVERY_V(IMPL_CONVERTTOMLIRSPARSETENSOR)
#undef IMPL_CONVERTTOMLIRSPARSETENSOR
void convertFromMLIRSparseTensor##VNAME(void *tensor, uint64_t *pRank, \
uint64_t *pNse, uint64_t **pShape, \
V **pValues, uint64_t **pIndices) { \
- fromMLIRSparseTensor<V>(tensor, pRank, pNse, pShape, pValues, pIndices); \
+ fromMLIRSparseTensor<V>( \
+ static_cast<SparseTensorStorage<uint64_t, uint64_t, V> *>(tensor), \
+ pRank, pNse, pShape, pValues, pIndices); \
}
FOREVERY_V(IMPL_CONVERTFROMMLIRSPARSETENSOR)
#undef IMPL_CONVERTFROMMLIRSPARSETENSOR