[mlir][sparse] Renaming x-macros for better hygiene
authorwren romano <2998727+wrengr@users.noreply.github.com>
Fri, 30 Sep 2022 20:01:18 +0000 (13:01 -0700)
committerwren romano <2998727+wrengr@users.noreply.github.com>
Fri, 30 Sep 2022 21:04:58 +0000 (14:04 -0700)
Now that mlir_sparsetensor_utils is a public library, this differential renames the x-macros to help avoid namespace pollution issues.

Reviewed By: aartbik, Peiming

Differential Revision: https://reviews.llvm.org/D134988

mlir/include/mlir/ExecutionEngine/SparseTensor/Enums.h
mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
mlir/include/mlir/ExecutionEngine/SparseTensorUtils.h
mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp
mlir/lib/ExecutionEngine/SparseTensorUtils.cpp

index bd665ab..84ac9f9 100644 (file)
@@ -67,7 +67,7 @@ enum class MLIR_SPARSETENSOR_EXPORT OverheadType : uint32_t {
 // fixed-width.  It excludes `index_type` because that type is often
 // handled specially (e.g., by translating it into the architecture-dependent
 // equivalent fixed-width overhead type).
-#define FOREVERY_FIXED_O(DO)                                                   \
+#define MLIR_SPARSETENSOR_FOREVERY_FIXED_O(DO)                                 \
   DO(64, uint64_t)                                                             \
   DO(32, uint32_t)                                                             \
   DO(16, uint16_t)                                                             \
@@ -75,8 +75,8 @@ enum class MLIR_SPARSETENSOR_EXPORT OverheadType : uint32_t {
 
 // This x-macro calls its argument on every overhead type, including
 // `index_type`.
-#define FOREVERY_O(DO)                                                         \
-  FOREVERY_FIXED_O(DO)                                                         \
+#define MLIR_SPARSETENSOR_FOREVERY_O(DO)                                       \
+  MLIR_SPARSETENSOR_FOREVERY_FIXED_O(DO)                                       \
   DO(0, index_type)
 
 // These are not just shorthands but indicate the particular
@@ -100,7 +100,7 @@ enum class MLIR_SPARSETENSOR_EXPORT PrimaryType : uint32_t {
 };
 
 // This x-macro includes all `V` types.
-#define FOREVERY_V(DO)                                                         \
+#define MLIR_SPARSETENSOR_FOREVERY_V(DO)                                       \
   DO(F64, double)                                                              \
   DO(F32, float)                                                               \
   DO(F16, f16)                                                                 \
index cfe7805..2f3d1a8 100644 (file)
@@ -131,35 +131,35 @@ public:
 #define DECL_NEWENUMERATOR(VNAME, V)                                           \
   virtual void newEnumerator(SparseTensorEnumeratorBase<V> **, uint64_t,       \
                              const uint64_t *) const;
-  FOREVERY_V(DECL_NEWENUMERATOR)
+  MLIR_SPARSETENSOR_FOREVERY_V(DECL_NEWENUMERATOR)
 #undef DECL_NEWENUMERATOR
 
   /// Pointers-overhead storage.
 #define DECL_GETPOINTERS(PNAME, P)                                             \
   virtual void getPointers(std::vector<P> **, uint64_t);
-  FOREVERY_FIXED_O(DECL_GETPOINTERS)
+  MLIR_SPARSETENSOR_FOREVERY_FIXED_O(DECL_GETPOINTERS)
 #undef DECL_GETPOINTERS
 
   /// Indices-overhead storage.
 #define DECL_GETINDICES(INAME, I)                                              \
   virtual void getIndices(std::vector<I> **, uint64_t);
-  FOREVERY_FIXED_O(DECL_GETINDICES)
+  MLIR_SPARSETENSOR_FOREVERY_FIXED_O(DECL_GETINDICES)
 #undef DECL_GETINDICES
 
   /// Primary storage.
 #define DECL_GETVALUES(VNAME, V) virtual void getValues(std::vector<V> **);
-  FOREVERY_V(DECL_GETVALUES)
+  MLIR_SPARSETENSOR_FOREVERY_V(DECL_GETVALUES)
 #undef DECL_GETVALUES
 
   /// Element-wise insertion in lexicographic index order.
 #define DECL_LEXINSERT(VNAME, V) virtual void lexInsert(const uint64_t *, V);
-  FOREVERY_V(DECL_LEXINSERT)
+  MLIR_SPARSETENSOR_FOREVERY_V(DECL_LEXINSERT)
 #undef DECL_LEXINSERT
 
   /// Expanded insertion.
 #define DECL_EXPINSERT(VNAME, V)                                               \
   virtual void expInsert(uint64_t *, V *, bool *, uint64_t *, uint64_t);
-  FOREVERY_V(DECL_EXPINSERT)
+  MLIR_SPARSETENSOR_FOREVERY_V(DECL_EXPINSERT)
 #undef DECL_EXPINSERT
 
   /// Finishes insertion.
index f3797a1..d240c42 100644 (file)
@@ -65,7 +65,7 @@ _mlir_ciface_newSparseTensor(StridedMemRefType<DimLevelType, 1> *aref, // NOLINT
 #define DECL_SPARSEVALUES(VNAME, V)                                            \
   MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_sparseValues##VNAME(              \
       StridedMemRefType<V, 1> *out, void *tensor);
-FOREVERY_V(DECL_SPARSEVALUES)
+MLIR_SPARSETENSOR_FOREVERY_V(DECL_SPARSEVALUES)
 #undef DECL_SPARSEVALUES
 
 /// Tensor-storage method to obtain direct access to the pointers array
@@ -73,7 +73,7 @@ FOREVERY_V(DECL_SPARSEVALUES)
 #define DECL_SPARSEPOINTERS(PNAME, P)                                          \
   MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_sparsePointers##PNAME(            \
       StridedMemRefType<P, 1> *out, void *tensor, index_type d);
-FOREVERY_O(DECL_SPARSEPOINTERS)
+MLIR_SPARSETENSOR_FOREVERY_O(DECL_SPARSEPOINTERS)
 #undef DECL_SPARSEPOINTERS
 
 /// Tensor-storage method to obtain direct access to the indices array
@@ -81,7 +81,7 @@ FOREVERY_O(DECL_SPARSEPOINTERS)
 #define DECL_SPARSEINDICES(INAME, I)                                           \
   MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_sparseIndices##INAME(             \
       StridedMemRefType<I, 1> *out, void *tensor, index_type d);
-FOREVERY_O(DECL_SPARSEINDICES)
+MLIR_SPARSETENSOR_FOREVERY_O(DECL_SPARSEINDICES)
 #undef DECL_SPARSEINDICES
 
 /// Coordinate-scheme method for adding a new element.
@@ -90,7 +90,7 @@ FOREVERY_O(DECL_SPARSEINDICES)
       void *coo, StridedMemRefType<V, 0> *vref,                                \
       StridedMemRefType<index_type, 1> *iref,                                  \
       StridedMemRefType<index_type, 1> *pref);
-FOREVERY_V(DECL_ADDELT)
+MLIR_SPARSETENSOR_FOREVERY_V(DECL_ADDELT)
 #undef DECL_ADDELT
 
 /// Coordinate-scheme method for getting the next element while iterating.
@@ -98,7 +98,7 @@ FOREVERY_V(DECL_ADDELT)
   MLIR_CRUNNERUTILS_EXPORT bool _mlir_ciface_getNext##VNAME(                   \
       void *coo, StridedMemRefType<index_type, 1> *iref,                       \
       StridedMemRefType<V, 0> *vref);
-FOREVERY_V(DECL_GETNEXT)
+MLIR_SPARSETENSOR_FOREVERY_V(DECL_GETNEXT)
 #undef DECL_GETNEXT
 
 /// Tensor-storage method to insert elements in lexicographical index order.
@@ -106,7 +106,7 @@ FOREVERY_V(DECL_GETNEXT)
   MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_lexInsert##VNAME(                 \
       void *tensor, StridedMemRefType<index_type, 1> *cref,                    \
       StridedMemRefType<V, 0> *vref);
-FOREVERY_V(DECL_LEXINSERT)
+MLIR_SPARSETENSOR_FOREVERY_V(DECL_LEXINSERT)
 #undef DECL_LEXINSERT
 
 /// Tensor-storage method to insert using expansion.
@@ -115,7 +115,7 @@ FOREVERY_V(DECL_LEXINSERT)
       void *tensor, StridedMemRefType<index_type, 1> *cref,                    \
       StridedMemRefType<V, 1> *vref, StridedMemRefType<bool, 1> *fref,         \
       StridedMemRefType<index_type, 1> *aref, index_type count);
-FOREVERY_V(DECL_EXPINSERT)
+MLIR_SPARSETENSOR_FOREVERY_V(DECL_EXPINSERT)
 #undef DECL_EXPINSERT
 
 //===----------------------------------------------------------------------===//
@@ -138,7 +138,7 @@ MLIR_CRUNNERUTILS_EXPORT void endInsert(void *tensor);
 #define DECL_OUTSPARSETENSOR(VNAME, V)                                         \
   MLIR_CRUNNERUTILS_EXPORT void outSparseTensor##VNAME(void *coo, void *dest,  \
                                                        bool sort);
-FOREVERY_V(DECL_OUTSPARSETENSOR)
+MLIR_SPARSETENSOR_FOREVERY_V(DECL_OUTSPARSETENSOR)
 #undef DECL_OUTSPARSETENSOR
 
 /// Releases the memory for the tensor-storage object.
@@ -147,7 +147,7 @@ MLIR_CRUNNERUTILS_EXPORT void delSparseTensor(void *tensor);
 /// Releases the memory for the coordinate-scheme object.
 #define DECL_DELCOO(VNAME, V)                                                  \
   MLIR_CRUNNERUTILS_EXPORT void delSparseTensorCOO##VNAME(void *coo);
-FOREVERY_V(DECL_DELCOO)
+MLIR_SPARSETENSOR_FOREVERY_V(DECL_DELCOO)
 #undef DECL_DELCOO
 
 /// Helper function to read a sparse tensor filename from the environment,
@@ -183,7 +183,7 @@ MLIR_CRUNNERUTILS_EXPORT void readSparseTensorShape(char *filename,
   MLIR_CRUNNERUTILS_EXPORT void *convertToMLIRSparseTensor##VNAME(             \
       uint64_t rank, uint64_t nse, uint64_t *shape, V *values,                 \
       uint64_t *indices, uint64_t *perm, uint8_t *sparse);
-FOREVERY_V(DECL_CONVERTTOMLIRSPARSETENSOR)
+MLIR_SPARSETENSOR_FOREVERY_V(DECL_CONVERTTOMLIRSPARSETENSOR)
 #undef DECL_CONVERTTOMLIRSPARSETENSOR
 
 /// Converts a sparse tensor to COO-flavored format expressed using
@@ -202,7 +202,7 @@ FOREVERY_V(DECL_CONVERTTOMLIRSPARSETENSOR)
   MLIR_CRUNNERUTILS_EXPORT void convertFromMLIRSparseTensor##VNAME(            \
       void *tensor, uint64_t *pRank, uint64_t *pNse, uint64_t **pShape,        \
       V **pValues, uint64_t **pIndices);
-FOREVERY_V(DECL_CONVERTFROMMLIRSPARSETENSOR)
+MLIR_SPARSETENSOR_FOREVERY_V(DECL_CONVERTFROMMLIRSPARSETENSOR)
 #undef DECL_CONVERTFROMMLIRSPARSETENSOR
 
 } // extern "C"
index 62c7399..e2d8e62 100644 (file)
@@ -79,9 +79,9 @@ Type mlir::sparse_tensor::getIndexOverheadType(
 }
 
 // TODO: Adjust the naming convention for the constructors of
-// `OverheadType` so we can use the `FOREVERY_O` x-macro here instead
-// of `FOREVERY_FIXED_O`; to further reduce the possibility of typo bugs
-// or things getting out of sync.
+// `OverheadType` so we can use the `MLIR_SPARSETENSOR_FOREVERY_O` x-macro
+// here instead of `MLIR_SPARSETENSOR_FOREVERY_FIXED_O`; to further reduce
+// the possibility of typo bugs or things getting out of sync.
 StringRef mlir::sparse_tensor::overheadTypeFunctionSuffix(OverheadType ot) {
   switch (ot) {
   case OverheadType::kIndex:
@@ -89,7 +89,7 @@ StringRef mlir::sparse_tensor::overheadTypeFunctionSuffix(OverheadType ot) {
 #define CASE(ONAME, O)                                                         \
   case OverheadType::kU##ONAME:                                                \
     return #ONAME;
-    FOREVERY_FIXED_O(CASE)
+    MLIR_SPARSETENSOR_FOREVERY_FIXED_O(CASE)
 #undef CASE
   }
   llvm_unreachable("Unknown OverheadType");
@@ -131,7 +131,7 @@ StringRef mlir::sparse_tensor::primaryTypeFunctionSuffix(PrimaryType pt) {
 #define CASE(VNAME, V)                                                         \
   case PrimaryType::k##VNAME:                                                  \
     return #VNAME;
-    FOREVERY_V(CASE)
+    MLIR_SPARSETENSOR_FOREVERY_V(CASE)
 #undef CASE
   }
   llvm_unreachable("Unknown PrimaryType");
index 7c4f3ea..888553c 100644 (file)
@@ -59,35 +59,35 @@ SparseTensorStorageBase::SparseTensorStorageBase(
       SparseTensorEnumeratorBase<V> **, uint64_t, const uint64_t *) const {    \
     FATAL_PIV("newEnumerator" #VNAME);                                         \
   }
-FOREVERY_V(IMPL_NEWENUMERATOR)
+MLIR_SPARSETENSOR_FOREVERY_V(IMPL_NEWENUMERATOR)
 #undef IMPL_NEWENUMERATOR
 
 #define IMPL_GETPOINTERS(PNAME, P)                                             \
   void SparseTensorStorageBase::getPointers(std::vector<P> **, uint64_t) {     \
     FATAL_PIV("getPointers" #PNAME);                                           \
   }
-FOREVERY_FIXED_O(IMPL_GETPOINTERS)
+MLIR_SPARSETENSOR_FOREVERY_FIXED_O(IMPL_GETPOINTERS)
 #undef IMPL_GETPOINTERS
 
 #define IMPL_GETINDICES(INAME, I)                                              \
   void SparseTensorStorageBase::getIndices(std::vector<I> **, uint64_t) {      \
     FATAL_PIV("getIndices" #INAME);                                            \
   }
-FOREVERY_FIXED_O(IMPL_GETINDICES)
+MLIR_SPARSETENSOR_FOREVERY_FIXED_O(IMPL_GETINDICES)
 #undef IMPL_GETINDICES
 
 #define IMPL_GETVALUES(VNAME, V)                                               \
   void SparseTensorStorageBase::getValues(std::vector<V> **) {                 \
     FATAL_PIV("getValues" #VNAME);                                             \
   }
-FOREVERY_V(IMPL_GETVALUES)
+MLIR_SPARSETENSOR_FOREVERY_V(IMPL_GETVALUES)
 #undef IMPL_GETVALUES
 
 #define IMPL_LEXINSERT(VNAME, V)                                               \
   void SparseTensorStorageBase::lexInsert(const uint64_t *, V) {               \
     FATAL_PIV("lexInsert" #VNAME);                                             \
   }
-FOREVERY_V(IMPL_LEXINSERT)
+MLIR_SPARSETENSOR_FOREVERY_V(IMPL_LEXINSERT)
 #undef IMPL_LEXINSERT
 
 #define IMPL_EXPINSERT(VNAME, V)                                               \
@@ -95,7 +95,7 @@ FOREVERY_V(IMPL_LEXINSERT)
                                           uint64_t) {                          \
     FATAL_PIV("expInsert" #VNAME);                                             \
   }
-FOREVERY_V(IMPL_EXPINSERT)
+MLIR_SPARSETENSOR_FOREVERY_V(IMPL_EXPINSERT)
 #undef IMPL_EXPINSERT
 
 #undef FATAL_PIV
index 992c85a..3eca2ba 100644 (file)
@@ -351,7 +351,7 @@ _mlir_ciface_newSparseTensor(StridedMemRefType<DimLevelType, 1> *aref, // NOLINT
     ref->sizes[0] = v->size();                                                 \
     ref->strides[0] = 1;                                                       \
   }
-FOREVERY_V(IMPL_SPARSEVALUES)
+MLIR_SPARSETENSOR_FOREVERY_V(IMPL_SPARSEVALUES)
 #undef IMPL_SPARSEVALUES
 
 #define IMPL_GETOVERHEAD(NAME, TYPE, LIB)                                      \
@@ -367,12 +367,12 @@ FOREVERY_V(IMPL_SPARSEVALUES)
   }
 #define IMPL_SPARSEPOINTERS(PNAME, P)                                          \
   IMPL_GETOVERHEAD(sparsePointers##PNAME, P, getPointers)
-FOREVERY_O(IMPL_SPARSEPOINTERS)
+MLIR_SPARSETENSOR_FOREVERY_O(IMPL_SPARSEPOINTERS)
 #undef IMPL_SPARSEPOINTERS
 
 #define IMPL_SPARSEINDICES(INAME, I)                                           \
   IMPL_GETOVERHEAD(sparseIndices##INAME, I, getIndices)
-FOREVERY_O(IMPL_SPARSEINDICES)
+MLIR_SPARSETENSOR_FOREVERY_O(IMPL_SPARSEINDICES)
 #undef IMPL_SPARSEINDICES
 #undef IMPL_GETOVERHEAD
 
@@ -393,7 +393,7 @@ FOREVERY_O(IMPL_SPARSEINDICES)
     static_cast<SparseTensorCOO<V> *>(coo)->add(indices, *value);              \
     return coo;                                                                \
   }
-FOREVERY_V(IMPL_ADDELT)
+MLIR_SPARSETENSOR_FOREVERY_V(IMPL_ADDELT)
 #undef IMPL_ADDELT
 
 #define IMPL_GETNEXT(VNAME, V)                                                 \
@@ -414,7 +414,7 @@ FOREVERY_V(IMPL_ADDELT)
     *value = elem->value;                                                      \
     return true;                                                               \
   }
-FOREVERY_V(IMPL_GETNEXT)
+MLIR_SPARSETENSOR_FOREVERY_V(IMPL_GETNEXT)
 #undef IMPL_GETNEXT
 
 #define IMPL_LEXINSERT(VNAME, V)                                               \
@@ -428,7 +428,7 @@ FOREVERY_V(IMPL_GETNEXT)
     V *value = vref->data + vref->offset;                                      \
     static_cast<SparseTensorStorageBase *>(tensor)->lexInsert(cursor, *value); \
   }
-FOREVERY_V(IMPL_LEXINSERT)
+MLIR_SPARSETENSOR_FOREVERY_V(IMPL_LEXINSERT)
 #undef IMPL_LEXINSERT
 
 #define IMPL_EXPINSERT(VNAME, V)                                               \
@@ -449,7 +449,7 @@ FOREVERY_V(IMPL_LEXINSERT)
     static_cast<SparseTensorStorageBase *>(tensor)->expInsert(                 \
         cursor, values, filled, added, count);                                 \
   }
-FOREVERY_V(IMPL_EXPINSERT)
+MLIR_SPARSETENSOR_FOREVERY_V(IMPL_EXPINSERT)
 #undef IMPL_EXPINSERT
 
 //===----------------------------------------------------------------------===//
@@ -475,7 +475,7 @@ void endInsert(void *tensor) {
       coo_.sort();                                                             \
     return writeExtFROSTT(coo_, static_cast<char *>(dest));                    \
   }
-FOREVERY_V(IMPL_OUTSPARSETENSOR)
+MLIR_SPARSETENSOR_FOREVERY_V(IMPL_OUTSPARSETENSOR)
 #undef IMPL_OUTSPARSETENSOR
 
 void delSparseTensor(void *tensor) {
@@ -486,7 +486,7 @@ void delSparseTensor(void *tensor) {
   void delSparseTensorCOO##VNAME(void *coo) {                                  \
     delete static_cast<SparseTensorCOO<V> *>(coo);                             \
   }
-FOREVERY_V(IMPL_DELCOO)
+MLIR_SPARSETENSOR_FOREVERY_V(IMPL_DELCOO)
 #undef IMPL_DELCOO
 
 char *getTensorFilename(index_type id) {
@@ -518,7 +518,7 @@ void readSparseTensorShape(char *filename, std::vector<uint64_t> *out) {
     return toMLIRSparseTensor<V>(rank, nse, shape, values, indices, perm,      \
                                  reinterpret_cast<DimLevelType *>(sparse));    \
   }
-FOREVERY_V(IMPL_CONVERTTOMLIRSPARSETENSOR)
+MLIR_SPARSETENSOR_FOREVERY_V(IMPL_CONVERTTOMLIRSPARSETENSOR)
 #undef IMPL_CONVERTTOMLIRSPARSETENSOR
 
 #define IMPL_CONVERTFROMMLIRSPARSETENSOR(VNAME, V)                             \
@@ -529,7 +529,7 @@ FOREVERY_V(IMPL_CONVERTTOMLIRSPARSETENSOR)
         static_cast<SparseTensorStorage<uint64_t, uint64_t, V> *>(tensor),     \
         pRank, pNse, pShape, pValues, pIndices);                               \
   }
-FOREVERY_V(IMPL_CONVERTFROMMLIRSPARSETENSOR)
+MLIR_SPARSETENSOR_FOREVERY_V(IMPL_CONVERTFROMMLIRSPARSETENSOR)
 #undef IMPL_CONVERTFROMMLIRSPARSETENSOR
 
 } // extern "C"