Revert "[mlir][sparse] Expose SpareTensor passes as enums instead of opaque numbers...
authorNick Kreeger <nick.kreeger@gmail.com>
Sun, 24 Apr 2022 01:14:48 +0000 (20:14 -0500)
committerNick Kreeger <nick.kreeger@gmail.com>
Sun, 24 Apr 2022 01:14:48 +0000 (20:14 -0500)
This reverts commit d59cf901cbae7991f7847eb038d825efff1221ad.

Build fails on NVIDIA Sparse tests:
https://lab.llvm.org/buildbot/#/builders/61/builds/25447

27 files changed:
mlir/benchmark/python/common.py
mlir/include/mlir/Dialect/SparseTensor/Pipelines/Passes.h
mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h
mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td
mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp
mlir/test/Dialect/SparseTensor/sparse_parallel.mlir
mlir/test/Dialect/SparseTensor/sparse_vector.mlir
mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir
mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir
mlir/test/Dialect/SparseTensor/sparse_vector_peeled.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir
mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py
mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py

index 31875f16bfdc361504664045992bb3cc5ed8a28b..da0ef20a18296222e7bc6704c42cf564625111df 100644 (file)
@@ -16,7 +16,7 @@ def setup_passes(mlir_module):
     """Setup pass pipeline parameters for benchmark functions.
     """
     opt = (
-        "parallelization-strategy=none"
+        "parallelization-strategy=0"
         " vectorization-strategy=0 vl=1 enable-simd-index32=False"
     )
     pipeline = f"sparse-compiler{{{opt}}}"
index 0c896872dd3364f908d6c29c8044738fa3468969..a7064a2508312f946259bd377b7b5c87787f4994 100644 (file)
@@ -30,14 +30,12 @@ namespace sparse_tensor {
 struct SparseCompilerOptions
     : public PassPipelineOptions<SparseCompilerOptions> {
   // These options must be kept in sync with `SparsificationBase`.
-
-  PassOptions::Option<enum SparseParallelizationStrategy> parallelization{
+  PassOptions::Option<int32_t> parallelization{
       *this, "parallelization-strategy",
-      desc("Set the parallelization strategy"),
-      init(SparseParallelizationStrategy::kNone)};
-  PassOptions::Option<enum SparseVectorizationStrategy> vectorization{
+      desc("Set the parallelization strategy"), init(0)};
+  PassOptions::Option<int32_t> vectorization{
       *this, "vectorization-strategy", desc("Set the vectorization strategy"),
-      init(SparseVectorizationStrategy::kNone)};
+      init(0)};
   PassOptions::Option<int32_t> vectorLength{
       *this, "vl", desc("Set the vector length"), init(1)};
   PassOptions::Option<bool> enableSIMDIndex32{
@@ -49,8 +47,10 @@ struct SparseCompilerOptions
 
   /// Projects out the options for `createSparsificationPass`.
   SparsificationOptions sparsificationOptions() const {
-    return SparsificationOptions(parallelization, vectorization, vectorLength,
-                                 enableSIMDIndex32, enableVLAVectorization);
+    return SparsificationOptions(sparseParallelizationStrategy(parallelization),
+                                 sparseVectorizationStrategy(vectorization),
+                                 vectorLength, enableSIMDIndex32,
+                                 enableVLAVectorization);
   }
 
   // These options must be kept in sync with `SparseTensorConversionBase`.
index 92afe5ea8c1a0d8d457faae729db8f7a4a0ec471..20a322e97dfc0304d6d38d1d4c26bff315961ee6 100644 (file)
@@ -45,6 +45,9 @@ enum class SparseParallelizationStrategy {
   // TODO: support reduction parallelization too?
 };
 
+/// Converts command-line parallelization flag to the strategy enum.
+SparseParallelizationStrategy sparseParallelizationStrategy(int32_t flag);
+
 /// Defines a vectorization strategy. Any inner loop is a candidate (full SIMD
 /// for parallel loops and horizontal SIMD for reduction loops). A loop is
 /// actually vectorized if (1) allowed by the strategy, and (2) the emitted
@@ -55,6 +58,9 @@ enum class SparseVectorizationStrategy {
   kAnyStorageInnerLoop
 };
 
+/// Converts command-line vectorization flag to the strategy enum.
+SparseVectorizationStrategy sparseVectorizationStrategy(int32_t flag);
+
 /// Options for the Sparsification pass.
 struct SparsificationOptions {
   SparsificationOptions(SparseParallelizationStrategy p,
index e7c16def6dafc3c28a5e47c9080f10e1e72fc162..6e36259de94903a8074c611ed1abd16fed23673a 100644 (file)
@@ -63,34 +63,10 @@ def Sparsification : Pass<"sparsification", "ModuleOp"> {
     "vector::VectorDialect",
   ];
   let options = [
-    Option<"parallelization", "parallelization-strategy", "enum SparseParallelizationStrategy",
-           "mlir::SparseParallelizationStrategy::kNone",
-           "Set the parallelization strategy", [{llvm::cl::values(
-             clEnumValN(mlir::SparseParallelizationStrategy::kNone, "none",
-                        "Turn off sparse parallelization."),
-             clEnumValN(mlir::SparseParallelizationStrategy::kDenseOuterLoop,
-                        "dense-outer-loop",
-                        "Enable dense outer loop sparse parallelization."),
-             clEnumValN(mlir::SparseParallelizationStrategy::kAnyStorageOuterLoop,
-                        "any-storage-outer-loop",
-                        "Enable sparse parallelization regardless of storage for the outer loop."),
-             clEnumValN(mlir::SparseParallelizationStrategy::kDenseAnyLoop,
-                        "dense-any-loop",
-                        "Enable dense parallelization for any loop."),
-             clEnumValN(mlir::SparseParallelizationStrategy::kAnyStorageAnyLoop,
-                        "any-storage-any-loop",
-                        "Enable sparse parallelization for any storage and loop."))}]>,
-    Option<"vectorization", "vectorization-strategy", "enum SparseVectorizationStrategy",
-           "mlir::SparseVectorizationStrategy::kNone",
-           "Set the vectorization strategy", [{llvm::cl::values(
-             clEnumValN(mlir::SparseVectorizationStrategy::kNone, "none",
-                        "Turn off sparse vectorization."),
-             clEnumValN(mlir::SparseVectorizationStrategy::kDenseInnerLoop,
-                        "dense-inner-loop",
-                        "Enable vectorization for dense inner loops."),
-             clEnumValN(mlir::SparseVectorizationStrategy::kAnyStorageInnerLoop,
-                        "any-storage-inner-loop",
-                        "Enable sparse vectorization for inner loops with any storage."))}]>,
+    Option<"parallelization", "parallelization-strategy", "int32_t", "0",
+           "Set the parallelization strategy">,
+    Option<"vectorization", "vectorization-strategy", "int32_t", "0",
+           "Set the vectorization strategy">,
     Option<"vectorLength", "vl", "int32_t", "1",
            "Set the vector length">,
     Option<"enableSIMDIndex32", "enable-simd-index32", "bool", "false",
index 4a9e9ae1f6675bc9ad3a42d064a7a8e4eb4e3f47..2b957e6fbad60849af6b8e55bc0360f4641b339e 100644 (file)
@@ -39,8 +39,8 @@ struct SparsificationPass : public SparsificationBase<SparsificationPass> {
   SparsificationPass() = default;
   SparsificationPass(const SparsificationPass &pass) = default;
   SparsificationPass(const SparsificationOptions &options) {
-    parallelization = options.parallelizationStrategy;
-    vectorization = options.vectorizationStrategy;
+    parallelization = static_cast<int32_t>(options.parallelizationStrategy);
+    vectorization = static_cast<int32_t>(options.vectorizationStrategy);
     vectorLength = options.vectorLength;
     enableSIMDIndex32 = options.enableSIMDIndex32;
     enableVLAVectorization = options.enableVLAVectorization;
@@ -50,8 +50,10 @@ struct SparsificationPass : public SparsificationBase<SparsificationPass> {
     auto *ctx = &getContext();
     RewritePatternSet patterns(ctx);
     // Translate strategy flags to strategy options.
-    SparsificationOptions options(parallelization, vectorization, vectorLength,
-                                  enableSIMDIndex32, enableVLAVectorization);
+    SparsificationOptions options(
+        sparseParallelizationStrategy(parallelization),
+        sparseVectorizationStrategy(vectorization), vectorLength,
+        enableSIMDIndex32, enableVLAVectorization);
     // Apply rewriting.
     populateSparsificationPatterns(patterns, options);
     vector::populateVectorToVectorCanonicalizationPatterns(patterns);
@@ -131,6 +133,33 @@ struct SparseTensorConversionPass
 
 } // namespace
 
+SparseParallelizationStrategy
+mlir::sparseParallelizationStrategy(int32_t flag) {
+  switch (flag) {
+  default:
+    return SparseParallelizationStrategy::kNone;
+  case 1:
+    return SparseParallelizationStrategy::kDenseOuterLoop;
+  case 2:
+    return SparseParallelizationStrategy::kAnyStorageOuterLoop;
+  case 3:
+    return SparseParallelizationStrategy::kDenseAnyLoop;
+  case 4:
+    return SparseParallelizationStrategy::kAnyStorageAnyLoop;
+  }
+}
+
+SparseVectorizationStrategy mlir::sparseVectorizationStrategy(int32_t flag) {
+  switch (flag) {
+  default:
+    return SparseVectorizationStrategy::kNone;
+  case 1:
+    return SparseVectorizationStrategy::kDenseInnerLoop;
+  case 2:
+    return SparseVectorizationStrategy::kAnyStorageInnerLoop;
+  }
+}
+
 SparseToSparseConversionStrategy
 mlir::sparseToSparseConversionStrategy(int32_t flag) {
   switch (flag) {
index 5e0268191fbf6c31b701408373d0c73e0deac64a..9af037c7829a8e681fa54e9db5bffac803b35860 100644 (file)
@@ -1,12 +1,12 @@
-// RUN: mlir-opt %s -sparsification="parallelization-strategy=none" | \
+// RUN: mlir-opt %s -sparsification="parallelization-strategy=0" | \
 // RUN:   FileCheck %s --check-prefix=CHECK-PAR0
-// RUN: mlir-opt %s -sparsification="parallelization-strategy=dense-outer-loop" | \
+// RUN: mlir-opt %s -sparsification="parallelization-strategy=1" | \
 // RUN:   FileCheck %s --check-prefix=CHECK-PAR1
-// RUN: mlir-opt %s -sparsification="parallelization-strategy=any-storage-outer-loop" | \
+// RUN: mlir-opt %s -sparsification="parallelization-strategy=2" | \
 // RUN:   FileCheck %s --check-prefix=CHECK-PAR2
-// RUN: mlir-opt %s -sparsification="parallelization-strategy=dense-any-loop" | \
+// RUN: mlir-opt %s -sparsification="parallelization-strategy=3" | \
 // RUN:   FileCheck %s --check-prefix=CHECK-PAR3
-// RUN: mlir-opt %s -sparsification="parallelization-strategy=any-storage-any-loop" | \
+// RUN: mlir-opt %s -sparsification="parallelization-strategy=4" | \
 // RUN:   FileCheck %s --check-prefix=CHECK-PAR4
 
 #DenseMatrix = #sparse_tensor.encoding<{
index be0aee2d46a6ccce67212ed17849605cc6b39d02..1425a7b896213924379df44ea9cef9d935f9acc4 100644 (file)
@@ -1,12 +1,12 @@
-// RUN: mlir-opt %s -sparsification="vectorization-strategy=none vl=16" -cse -split-input-file | \
+// RUN: mlir-opt %s -sparsification="vectorization-strategy=0 vl=16" -cse -split-input-file | \
 // RUN:   FileCheck %s --check-prefix=CHECK-VEC0
-// RUN: mlir-opt %s -sparsification="vectorization-strategy=dense-inner-loop vl=16" -cse -split-input-file | \
+// RUN: mlir-opt %s -sparsification="vectorization-strategy=1 vl=16" -cse -split-input-file | \
 // RUN:   FileCheck %s --check-prefix=CHECK-VEC1
-// RUN: mlir-opt %s -sparsification="vectorization-strategy=any-storage-inner-loop vl=16" -cse -split-input-file | \
+// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=16" -cse -split-input-file | \
 // RUN:   FileCheck %s --check-prefix=CHECK-VEC2
-// RUN: mlir-opt %s -sparsification="vectorization-strategy=any-storage-inner-loop vl=16 enable-simd-index32=true" -cse -split-input-file | \
+// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=16 enable-simd-index32=true" -cse -split-input-file | \
 // RUN:   FileCheck %s --check-prefix=CHECK-VEC3
-// RUN: mlir-opt %s -sparsification="vectorization-strategy=any-storage-inner-loop vl=4 enable-vla-vectorization=true" -cse -split-input-file | \
+// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=4 enable-vla-vectorization=true" -cse -split-input-file | \
 // RUN:   FileCheck %s --check-prefix=CHECK-VEC4
 
 #DenseVector = #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }>
index e27c2d826fc75da051749321559b07687564754e..df55b8373e0eef6fa4bf16f36444ea0fddce357f 100644 (file)
@@ -1,6 +1,6 @@
 // NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
 
-// RUN: mlir-opt %s -sparsification="vectorization-strategy=any-storage-inner-loop vl=8" -canonicalize | \
+// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=8" -canonicalize | \
 // RUN:   FileCheck %s
 
 #SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["dense","compressed"]}>
index 5665f18f9680c476920c66c00915185d846326ba..792e74193149908a7d2ffc7deea6bb8424b13c81 100644 (file)
@@ -5,7 +5,7 @@
 // about what constitutes a good test! The CHECK should be
 // minimized and named to reflect the test intent.
 
-// RUN: mlir-opt %s -sparsification="vectorization-strategy=any-storage-inner-loop vl=8" -canonicalize | \
+// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=8" -canonicalize | \
 // RUN:   FileCheck %s
 
 #SparseVector = #sparse_tensor.encoding<{
index 276b8a9b8b933995736c85650622a7a7de991e5d..65f1f7216bed83bc5dc0b7701a8615f97af8885c 100644 (file)
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -sparsification="vectorization-strategy=any-storage-inner-loop vl=16" -scf-for-loop-peeling -canonicalize | \
+// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=16" -scf-for-loop-peeling -canonicalize | \
 // RUN:   FileCheck %s
 
 #SparseVector = #sparse_tensor.encoding<{
index d7d192dc5be0bb5d419f48f3600fc6c9eda6a82f..577bca6a23f1ff28313a1b61d81086ec36ee1d81 100644 (file)
@@ -6,7 +6,7 @@
 //
 // Do the same run, but now with SIMDization as well. This should not change the outcome.
 //
-// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=2" | \
+// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=2" | \
 // RUN: mlir-cpu-runner \
 // RUN:  -e entry -entry-point-result=void  \
 // RUN:  -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
index bb2e3a2369b2d6e504bab3ad2d2a5971b954bbcd..6f86ac56a0596e146952832d56eb6b1f839e6bb8 100644 (file)
@@ -5,7 +5,7 @@
 //
 // Do the same run, but now with SIMDization as well. This should not change the outcome.
 //
-// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=2" | \
+// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=2" | \
 // RUN: mlir-cpu-runner -e entry -entry-point-result=void \
 // RUN:  -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
 // RUN: FileCheck %s
index 78dde398b16adcdcf547eb63f768d469a0d31b31..d7467b3d59af413515843872b3af7fc5b4d878cf 100644 (file)
@@ -7,7 +7,7 @@
 //
 // Do the same run, but now with SIMDization as well. This should not change the outcome.
 //
-// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=4" | \
+// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=4" | \
 // RUN: TENSOR0="%mlir_integration_test_dir/data/test.tns" \
 // RUN: mlir-cpu-runner \
 // RUN:  -e entry -entry-point-result=void  \
index 74e936371732d339256819ebc8a92a067371f4c5..b0da613a17ce59050971b9a737dea6b739c4803a 100644 (file)
@@ -5,7 +5,7 @@
 //
 // Do the same run, but now with SIMDization as well. This should not change the outcome.
 //
-// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=4" | \
+// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=4" | \
 // RUN: mlir-cpu-runner -e entry -entry-point-result=void \
 // RUN:  -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
 // RUN: FileCheck %s
index 007e34fb3aca241218bee7c5c23ebc38067c5092..517f811d0748a2606337cc70ecc337b70132ded5 100644 (file)
@@ -8,7 +8,7 @@
 // Do the same run, but now with SIMDization as well. This should not change the outcome.
 //
 // RUN: mlir-opt %s \
-// RUN:   --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=16 enable-simd-index32" | \
+// RUN:   --sparse-compiler="vectorization-strategy=2 vl=16 enable-simd-index32" | \
 // RUN: TENSOR0="%mlir_integration_test_dir/data/wide.mtx" \
 // RUN: mlir-cpu-runner \
 // RUN:  -e entry -entry-point-result=void  \
index 8042ab744746fbb68c479e876456ebd0b0b0bd71..1400d376ae274f0e213f1462b071d65ca9317a97 100644 (file)
@@ -7,7 +7,7 @@
 //
 // Do the same run, but now with SIMDization as well. This should not change the outcome.
 //
-// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=4" | \
+// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=4" | \
 // RUN: TENSOR0="%mlir_integration_test_dir/data/mttkrp_b.tns" \
 // RUN: mlir-cpu-runner \
 // RUN:  -e entry -entry-point-result=void  \
index 3dcdc018f14319b199a211df6c793890520b1bc1..95bc06de123de187980db36b33b139a78aee570c 100644 (file)
@@ -7,7 +7,7 @@
 //
 // Do the same run, but now with SIMDization as well. This should not change the outcome.
 //
-// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=4" | \
+// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=4" | \
 // RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \
 // RUN: mlir-cpu-runner \
 // RUN:  -e entry -entry-point-result=void  \
index aec01cb99f1f7e9b8de25d8f1f378dd17284791c..33daf749247b03ee7b23ef47f30dfc20d093758f 100644 (file)
@@ -5,7 +5,7 @@
 //
 // Do the same run, but now with SIMDization as well. This should not change the outcome.
 //
-// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=2" | \
+// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=2" | \
 // RUN: mlir-cpu-runner -e entry -entry-point-result=void \
 // RUN:  -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
 // RUN: FileCheck %s
index 1bcc1ba35a6aa5eeb8a0aa0cfb005653d7f1ee11..83406e695297760fe69474f755bbe9ca9bcacce6 100644 (file)
@@ -5,7 +5,7 @@
 //
 // Do the same run, but now with SIMDization as well. This should not change the outcome.
 //
-// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=8" | \
+// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=8" | \
 // RUN: mlir-cpu-runner -e entry -entry-point-result=void \
 // RUN:  -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
 // RUN: FileCheck %s
index 7efc4537dcdbea8470db27fb9e0bfb0787802e9a..de9c9f1a82101450401d08f1cc99c73d194e9abf 100644 (file)
@@ -8,7 +8,7 @@
 // Do the same run, but now with SIMDization as well. This should not change the outcome.
 //
 // RUN: mlir-opt %s \
-// RUN:   --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=4 enable-simd-index32" | \
+// RUN:   --sparse-compiler="vectorization-strategy=2 vl=4 enable-simd-index32" | \
 // RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \
 // RUN: mlir-cpu-runner \
 // RUN:  -e entry -entry-point-result=void  \
index db205aff97a3c1d66c932a9dc8055893eb07c940..aadb8b3e88e1155babd7b9ae3634a9251cbd59b4 100755 (executable)
@@ -5,7 +5,7 @@
 //
 // Do the same run, but now with SIMDization as well. This should not change the outcome.
 //
-// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=8" | \
+// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=8" | \
 // RUN: mlir-cpu-runner -e entry -entry-point-result=void \
 // RUN:  -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
 // RUN: FileCheck %s
index 351b67859f6738891b16dac8ef61d8ac7434a6b5..09a17e10ba5a4df460f7278279a536c22cb21c45 100644 (file)
@@ -6,7 +6,7 @@
 //
 // Do the same run, but now with SIMDization as well. This should not change the outcome.
 //
-// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=4" | \
+// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=4" | \
 // RUN: mlir-cpu-runner \
 // RUN:  -e entry -entry-point-result=void  \
 // RUN:  -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
index 674cb562ef21c8f3169e4c9decc150841d43b007..6ced1f75c13f07c067c0ec0b3805a6ef7fbefcb3 100644 (file)
@@ -7,7 +7,7 @@
 //
 // Do the same run, but now with SIMDization as well. This should not change the outcome.
 //
-// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=2" | \
+// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=2" | \
 // RUN: TENSOR0="%mlir_integration_test_dir/data/wide.mtx" \
 // RUN: mlir-cpu-runner \
 // RUN:  -e entry -entry-point-result=void  \
index 5d37f591ce894ca95a22fc4c23b762a14d7d7840..d14d9567d971fbaf26beff3d8986c4e1273a5a4a 100644 (file)
@@ -7,7 +7,7 @@
 //
 // Do the same run, but now with SIMDization as well. This should not change the outcome.
 //
-// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=2" | \
+// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=2" | \
 // RUN: TENSOR0="%mlir_integration_test_dir/data/test_symmetric.mtx" \
 // RUN: mlir-cpu-runner \
 // RUN:  -e entry -entry-point-result=void  \
index a6abb3cc4605b1ab63ad3bad9d1b5a8412c23f7e..4ed741ed3727e54dcf5b0be0283eaff5e480f0ee 100644 (file)
@@ -140,24 +140,22 @@ def main():
             ir.AffineMap.get_permutation([0, 1]),
             ir.AffineMap.get_permutation([1, 0])
         ]
-        vec_strategy = [
-          'none', 'dense-inner-loop'
-        ]
         for level in levels:
             for ordering in orderings:
                 for pwidth in [32]:
                     for iwidth in [32]:
-                        for vec in vec_strategy:
-                            for e in [True]:
-                                vl = 1 if vec == 0 else 16
-                                attr = st.EncodingAttr.get(level, ordering, pwidth, iwidth)
-                                opt = (f'parallelization-strategy=none '
-                                       f'vectorization-strategy={vec} '
-                                       f'vl={vl} enable-simd-index32={e}')
-                                compiler = sparse_compiler.SparseCompiler(
-                                    options=opt, opt_level=0, shared_libs=[support_lib])
-                                build_compile_and_run_SDDMMM(attr, compiler)
-                                count = count + 1
+                        for par in [0]:
+                            for vec in [0, 1]:
+                                for e in [True]:
+                                    vl = 1 if vec == 0 else 16
+                                    attr = st.EncodingAttr.get(level, ordering, pwidth, iwidth)
+                                    opt = (f'parallelization-strategy={par} '
+                                           f'vectorization-strategy={vec} '
+                                           f'vl={vl} enable-simd-index32={e}')
+                                    compiler = sparse_compiler.SparseCompiler(
+                                        options=opt, opt_level=0, shared_libs=[support_lib])
+                                    build_compile_and_run_SDDMMM(attr, compiler)
+                                    count = count + 1
     # CHECK: Passed 16 tests
     print('Passed ', count, 'tests')
 
index 57ca3d6e1a94c157eaf2307f6615c9fd1280e3ec..9712620a4a7a9d0d6ef0da36c29db4d7f2503cb9 100644 (file)
@@ -120,10 +120,12 @@ def main():
         # a *single* sparse tensor. Note that we deliberate do not exhaustively
         # search the full state space to reduce runtime of the test. It is
         # straightforward to adapt the code below to explore more combinations.
+        par = 0
+        vec = 0
         vl = 1
         e = False
-        opt = (f'parallelization-strategy=none '
-               f'vectorization-strategy=none '
+        opt = (f'parallelization-strategy={par} '
+               f'vectorization-strategy={vec} '
                f'vl={vl} enable-simd-index32={e}')
         levels = [[st.DimLevelType.dense, st.DimLevelType.dense],
                   [st.DimLevelType.dense, st.DimLevelType.compressed],
index e134f78c5f57363cf4da898a69026fa4c22c1df7..76dfd3cf145fc1a1571a3a1d4b21a7f0e65e78ec 100644 (file)
@@ -182,11 +182,13 @@ def main():
   # CHECK-LABEL: TEST: test_stress
   print("\nTEST: test_stress")
   with ir.Context() as ctx, ir.Location.unknown():
+    par = 0
+    vec = 0
     vl = 1
     e = False
     sparsification_options = (
-        f'parallelization-strategy=none '
-        f'vectorization-strategy=none '
+        f'parallelization-strategy={par} '
+        f'vectorization-strategy={vec} '
         f'vl={vl} '
         f'enable-simd-index32={e}')
     compiler = sparse_compiler.SparseCompiler(