[mlir] Fix warnings in release builds
authorKazu Hirata <kazu@google.com>
Wed, 14 Jun 2023 21:22:17 +0000 (14:22 -0700)
committerKazu Hirata <kazu@google.com>
Wed, 14 Jun 2023 21:22:17 +0000 (14:22 -0700)
This patch fixes:

  mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp:846:16:
  error: unused variable 'lvlTp' [-Werror,-Wunused-variable]

  mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp:1059:13:
  error: unused variable '[t, l]' [-Werror,-Wunused-variable]

mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp

index 0a70cb0..e3abe10 100644 (file)
@@ -849,6 +849,7 @@ std::pair<Operation *, Value> LoopEmitter::emitWhileLoopOverTensorsAtLvls(
     // Must be a recognizable sparse level.
     assert(isCompressedDLT(lvlTp) || isCompressedWithHiDLT(lvlTp) ||
            isSingletonDLT(lvlTp));
+    (void)lvlTp;
 
     unsigned prevSz = ivs.size();
     const auto reassoc = getCollapseReassociation(tid, lvl);
@@ -1054,12 +1055,14 @@ Operation *LoopEmitter::enterCoIterationOverTensorsAtLvls(
     OpBuilder &builder, Location loc, ArrayRef<TensorLevel> tidLvls,
     MutableArrayRef<Value> reduc, bool tryParallel, bool genDedup,
     bool needsUniv) {
+#ifndef NDEBUG
   // Sanity checks.
   assert(!tidLvls.empty());
   for (auto [t, l] : unpackTensorLevelRange(tidLvls)) {
     assert(!coords[t][l] ||                 // We cannot re-enter the same level
            !dependentLvlMap[t][l].empty()); // unless it is a slice-driver loop
   }
+#endif
   // TODO: support multiple return on parallel for?
   tryParallel = tryParallel && reduc.size() <= 1;