From: Aart Bik Date: Fri, 15 Oct 2021 17:07:45 +0000 (-0700) Subject: [mlir][sparse] run less combinations of SpMM in test (to reduce runtime) X-Git-Tag: upstream/15.0.7~28471 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=e9b1c974be272ca51800cff2cd561f9e53eb127e;p=platform%2Fupstream%2Fllvm.git [mlir][sparse] run less combinations of SpMM in test (to reduce runtime) This revision also adds a few passes to the sparse compiler part to unify the transformation sequence with all other paths we currently use. Reviewed By: nicolasvasilache Differential Revision: https://reviews.llvm.org/D111900 --- diff --git a/mlir/test/python/dialects/sparse_tensor/test_SpMM.py b/mlir/test/python/dialects/sparse_tensor/test_SpMM.py index f5f6d8b..38ca333 100644 --- a/mlir/test/python/dialects/sparse_tensor/test_SpMM.py +++ b/mlir/test/python/dialects/sparse_tensor/test_SpMM.py @@ -116,14 +116,16 @@ class SparseCompiler: def __init__(self, options: str): pipeline = ( + f'builtin.func(linalg-generalize-named-ops,linalg-fuse-elementwise-ops),' f'sparsification{{{options}}},' f'sparse-tensor-conversion,' - f'builtin.func(convert-linalg-to-loops,convert-vector-to-scf),' + f'builtin.func(linalg-bufferize,convert-linalg-to-loops,convert-vector-to-scf),' f'convert-scf-to-std,' f'func-bufferize,' f'tensor-constant-bufferize,' f'builtin.func(tensor-bufferize,std-bufferize,finalizing-bufferize),' f'convert-vector-to-llvm{{reassociate-fp-reductions=1 enable-index-optimizations=1}},' + f'lower-affine,' f'convert-memref-to-llvm,' f'convert-std-to-llvm,' f'reconcile-unrealized-casts') @@ -134,7 +136,7 @@ class SparseCompiler: # CHECK-LABEL: TEST: testSpMM -# CHECK: Passed 72 tests +# CHECK: Passed 8 tests @run def testSpMM(): # Obtain path to runtime support library. @@ -143,8 +145,10 @@ def testSpMM(): with ir.Context() as ctx, ir.Location.unknown(): count = 0 - # Fixed compiler optimization strategy. - # TODO: explore state space here too + # Loop over various ways to compile and annotate the SpMM kernel with + # a *single* sparse tensor. Note that we deliberate do not exhaustively + # search the full state space to reduce runtime of the test. It is + # straightforward to adapt the code below to explore more combinations. par = 0 vec = 0 vl = 1 @@ -152,9 +156,6 @@ def testSpMM(): opt = (f'parallelization-strategy={par} ' f'vectorization-strategy={vec} ' f'vl={vl} enable-simd-index32={e}') - # Exhaustive loop over various ways to annotate a kernel with - # a *single* sparse tensor. Even this subset already gives - # quite a large state space! levels = [[st.DimLevelType.dense, st.DimLevelType.dense], [st.DimLevelType.dense, st.DimLevelType.compressed], [st.DimLevelType.compressed, st.DimLevelType.dense], @@ -163,7 +164,7 @@ def testSpMM(): ir.AffineMap.get_permutation([0, 1]), ir.AffineMap.get_permutation([1, 0]) ] - bitwidths = [0, 8, 32] + bitwidths = [0] for level in levels: for ordering in orderings: for pwidth in bitwidths: