From 6e1cd8256e461deca51e45e7aae33088e8f5b966 Mon Sep 17 00:00:00 2001 From: Tianqi Chen Date: Thu, 2 Apr 2020 16:56:24 -0700 Subject: [PATCH] [REFACTOR] tvm.hybrid -> te.hybrid (#5223) Rationale: The current hybrid module is more aligned with the te part. We might consider add a new varient of hybrid script that support the unified IR later. This refactor paves for the potential later changes. --- docs/api/python/hybrid.rst | 23 ------------ docs/api/python/index.rst | 1 - docs/api/python/te.rst | 8 +++++ docs/langref/hybrid_script.rst | 10 +++--- python/tvm/__init__.py | 3 -- python/tvm/relay/op/_reduce.py | 2 +- python/tvm/relay/op/_tensor.py | 2 +- python/tvm/relay/op/_transform.py | 2 +- python/tvm/relay/op/nn/_nn.py | 2 +- python/tvm/te/__init__.py | 1 + python/tvm/{ => te}/hybrid/__init__.py | 5 ++- python/tvm/{ => te}/hybrid/calls.py | 0 python/tvm/{ => te}/hybrid/module.py | 4 +-- python/tvm/{ => te}/hybrid/parser.py | 0 python/tvm/{ => te}/hybrid/preprocessor.py | 0 python/tvm/{ => te}/hybrid/runtime.py | 2 +- python/tvm/{ => te}/hybrid/util.py | 0 ...t_hybrid_script.py => test_te_hybrid_script.py} | 42 +++++++++++----------- .../unittest/test_tir_pass_storage_rewrite.py | 2 +- topi/python/topi/argwhere.py | 2 +- topi/python/topi/vision/nms.py | 2 +- topi/python/topi/vision/ssd/multibox.py | 2 +- topi/python/topi/x86/roi_align.py | 2 +- 23 files changed, 49 insertions(+), 68 deletions(-) delete mode 100644 docs/api/python/hybrid.rst rename python/tvm/{ => te}/hybrid/__init__.py (97%) rename python/tvm/{ => te}/hybrid/calls.py (100%) rename python/tvm/{ => te}/hybrid/module.py (97%) rename python/tvm/{ => te}/hybrid/parser.py (100%) rename python/tvm/{ => te}/hybrid/preprocessor.py (100%) rename python/tvm/{ => te}/hybrid/runtime.py (99%) rename python/tvm/{ => te}/hybrid/util.py (100%) rename tests/python/unittest/{test_hybrid_script.py => test_te_hybrid_script.py} (97%) diff --git a/docs/api/python/hybrid.rst b/docs/api/python/hybrid.rst deleted file mode 100644 index 1184c83..0000000 --- a/docs/api/python/hybrid.rst +++ /dev/null @@ -1,23 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one - or more contributor license agreements. See the NOTICE file - distributed with this work for additional information - regarding copyright ownership. The ASF licenses this file - to you under the Apache License, Version 2.0 (the - "License"); you may not use this file except in compliance - with the License. You may obtain a copy of the License at - -.. http://www.apache.org/licenses/LICENSE-2.0 - -.. Unless required by applicable law or agreed to in writing, - software distributed under the License is distributed on an - "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - KIND, either express or implied. See the License for the - specific language governing permissions and limitations - under the License. - -tvm.hybrid ----------- -.. automodule:: tvm.hybrid - :members: - :imported-members: - :autosummary: diff --git a/docs/api/python/index.rst b/docs/api/python/index.rst index f62a4b8..09da9f1 100644 --- a/docs/api/python/index.rst +++ b/docs/api/python/index.rst @@ -33,7 +33,6 @@ Python API rpc contrib graph_runtime - hybrid relay/index vta/index topi diff --git a/docs/api/python/te.rst b/docs/api/python/te.rst index 363dae6..83e0042 100644 --- a/docs/api/python/te.rst +++ b/docs/api/python/te.rst @@ -23,3 +23,11 @@ tvm.te :members: :imported-members: :autosummary: + + +tvm.te.hybrid +------------- +.. automodule:: tvm.te.hybrid + :members: + :imported-members: + :autosummary: diff --git a/docs/langref/hybrid_script.rst b/docs/langref/hybrid_script.rst index 82c4f3c..5fae67d 100644 --- a/docs/langref/hybrid_script.rst +++ b/docs/langref/hybrid_script.rst @@ -33,11 +33,11 @@ Software Emulation ~~~~~~~~~~~~~~~~~~ Both software emulation and compilation are supported. To define a function, -you need to use ``tvm.hybrid.script`` decorator to indicate this is a hybrid function: +you need to use ``tvm.te.hybrid.script`` decorator to indicate this is a hybrid function: .. code-block:: python - @tvm.hybrid.script + @tvm.te.hybrid.script def outer_product(a, b, c): c = output_tensor((100, 99), 'float32') for i in range(a.shape[0]): @@ -85,7 +85,7 @@ to LLVM module. Tuning ~~~~~~ -Follow up the example above, you can use some tvm like interfaces to tune the code: +Follow up the example above, you can use some tvm like interfaces to tune the code: .. code-block:: python @@ -101,7 +101,7 @@ loop manipulation (``split`` and ``fuse``), and ``reorder``. This is a preliminary function, so users should be in charge of the correctness of the functionality after tuning. Specifically, users should be careful when - fusing and reorderding imperfect loops. + fusing and reorderding imperfect loops. Loops ~~~~~ @@ -119,7 +119,7 @@ Users can access containers by either constants or constants loops annotated. .. code-block:: python - @tvm.hybrid.script + @tvm.te.hybrid.script def foo(a, b): # b is a tvm.container.Array c = output_tensor(a.shape, a.dtype) for i in const_range(len(a)): # because you have b access, i should be explicitly annotated as const_range diff --git a/python/tvm/__init__.py b/python/tvm/__init__.py index 0c4ca13..916708a 100644 --- a/python/tvm/__init__.py +++ b/python/tvm/__init__.py @@ -57,9 +57,6 @@ from . import testing # tvm.driver from .driver import build, lower -# tvm.hybrid -from . import hybrid - # others from . import arith diff --git a/python/tvm/relay/op/_reduce.py b/python/tvm/relay/op/_reduce.py index ab8b7c2..0eeeb95 100644 --- a/python/tvm/relay/op/_reduce.py +++ b/python/tvm/relay/op/_reduce.py @@ -18,9 +18,9 @@ from __future__ import absolute_import from tvm.runtime import convert +from tvm.te.hybrid import script from topi.util import get_const_int, get_const_tuple from . import op as _reg -from ...hybrid import script _reg.register_reduce_schedule("argmax") _reg.register_reduce_schedule("argmin") diff --git a/python/tvm/relay/op/_tensor.py b/python/tvm/relay/op/_tensor.py index eb35501..f24da05 100644 --- a/python/tvm/relay/op/_tensor.py +++ b/python/tvm/relay/op/_tensor.py @@ -19,11 +19,11 @@ import topi from tvm.runtime import convert +from tvm.te.hybrid import script from topi.util import get_const_tuple from .op import register_compute, register_shape_func from .op import register_broadcast_schedule, register_injective_schedule from .op import register_pattern, OpPattern -from ...hybrid import script register_broadcast_schedule("log") diff --git a/python/tvm/relay/op/_transform.py b/python/tvm/relay/op/_transform.py index 1f85e31..ee23fce 100644 --- a/python/tvm/relay/op/_transform.py +++ b/python/tvm/relay/op/_transform.py @@ -19,13 +19,13 @@ from __future__ import absolute_import import tvm from tvm import te +from tvm.te.hybrid import script from tvm.runtime import convert import topi from topi.util import get_const_int, get_const_tuple from . import op as _reg from . import strategy from .op import OpPattern -from ...hybrid import script _reg.register_broadcast_schedule("broadcast_to") _reg.register_broadcast_schedule("broadcast_to_like") diff --git a/python/tvm/relay/op/nn/_nn.py b/python/tvm/relay/op/nn/_nn.py index aa35fa2..65a1162 100644 --- a/python/tvm/relay/op/nn/_nn.py +++ b/python/tvm/relay/op/nn/_nn.py @@ -22,11 +22,11 @@ import topi from topi.util import get_const_tuple from tvm.runtime import convert +from tvm.te.hybrid import script from .. import op as reg from .. import strategy from ..op import OpPattern from .._tensor import elemwise_shape_func -from ....hybrid import script # relu reg.register_broadcast_schedule("nn.relu") diff --git a/python/tvm/te/__init__.py b/python/tvm/te/__init__.py index 1ba5549..0016160 100644 --- a/python/tvm/te/__init__.py +++ b/python/tvm/te/__init__.py @@ -34,3 +34,4 @@ from .operation import thread_axis, reduce_axis from .tensor import PlaceholderOp, ComputeOp, TensorComputeOp, ScanOp, ExternOp, HybridOp from .autodiff import gradient +from . import hybrid diff --git a/python/tvm/hybrid/__init__.py b/python/tvm/te/hybrid/__init__.py similarity index 97% rename from python/tvm/hybrid/__init__.py rename to python/tvm/te/hybrid/__init__.py index 6829496..31acaeb 100644 --- a/python/tvm/hybrid/__init__.py +++ b/python/tvm/te/hybrid/__init__.py @@ -31,8 +31,7 @@ HalideIR. import inspect import tvm._ffi from tvm.driver.build_module import form_body - -from .._ffi.base import decorate +from tvm._ffi.base import decorate from .module import HybridModule from .parser import source_to_op @@ -95,4 +94,4 @@ def build(sch, inputs, outputs, name="hybrid_func"): return HybridModule(src, name) -tvm._ffi._init_api("tvm.hybrid") +tvm._ffi._init_api("tvm.hybrid", __name__) diff --git a/python/tvm/hybrid/calls.py b/python/tvm/te/hybrid/calls.py similarity index 100% rename from python/tvm/hybrid/calls.py rename to python/tvm/te/hybrid/calls.py diff --git a/python/tvm/hybrid/module.py b/python/tvm/te/hybrid/module.py similarity index 97% rename from python/tvm/hybrid/module.py rename to python/tvm/te/hybrid/module.py index 9811ae1b..48b483e 100644 --- a/python/tvm/hybrid/module.py +++ b/python/tvm/te/hybrid/module.py @@ -23,7 +23,7 @@ To enable this feature, you need to build with -DUSE_HYBRID_DUMP=ON. import ast -from ..contrib import util +from tvm.contrib import util from .util import _internal_assert from .util import _is_tvm_arg_types from .parser import source_to_op @@ -52,7 +52,7 @@ class HybridModule(object): temp = util.tempdir() dst = temp.relpath("script.py") with open(dst, 'w') as f: - f.write("import tvm\n@tvm.hybrid.script\n%s" % src) + f.write("import tvm\n@tvm.te.hybrid.script\n%s" % src) if name is not None: self.name = name diff --git a/python/tvm/hybrid/parser.py b/python/tvm/te/hybrid/parser.py similarity index 100% rename from python/tvm/hybrid/parser.py rename to python/tvm/te/hybrid/parser.py diff --git a/python/tvm/hybrid/preprocessor.py b/python/tvm/te/hybrid/preprocessor.py similarity index 100% rename from python/tvm/hybrid/preprocessor.py rename to python/tvm/te/hybrid/preprocessor.py diff --git a/python/tvm/hybrid/runtime.py b/python/tvm/te/hybrid/runtime.py similarity index 99% rename from python/tvm/hybrid/runtime.py rename to python/tvm/te/hybrid/runtime.py index 9f92b80..7dcfc7c 100644 --- a/python/tvm/hybrid/runtime.py +++ b/python/tvm/te/hybrid/runtime.py @@ -17,7 +17,7 @@ """Intrinsics of TVM-Python Hybrid Script for Python emulation runtime""" import numpy -from .. import target +from tvm import target class bind(object): #pylint: disable=invalid-name diff --git a/python/tvm/hybrid/util.py b/python/tvm/te/hybrid/util.py similarity index 100% rename from python/tvm/hybrid/util.py rename to python/tvm/te/hybrid/util.py diff --git a/tests/python/unittest/test_hybrid_script.py b/tests/python/unittest/test_te_hybrid_script.py similarity index 97% rename from tests/python/unittest/test_hybrid_script.py rename to tests/python/unittest/test_te_hybrid_script.py index 5a56cc3..f1e8967 100644 --- a/tests/python/unittest/test_hybrid_script.py +++ b/tests/python/unittest/test_te_hybrid_script.py @@ -18,8 +18,8 @@ import tvm, inspect, sys, traceback, numpy, pytest, types, os from tvm import te from tvm.contrib import util -from tvm.hybrid import script -from tvm.hybrid.runtime import HYBRID_GLOBALS +from tvm.te.hybrid import script +from tvm.te.hybrid.runtime import HYBRID_GLOBALS @pytest.mark.skip def run_and_check(func, args, var_dict={}, target='llvm', sch=None, outs=None): @@ -80,7 +80,7 @@ def run_and_check(func, args, var_dict={}, target='llvm', sch=None, outs=None): module_args = [i for i in args if isinstance(i, (te.tensor.Tensor, tvm.tir.Var))] module_outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs - h_module = tvm.hybrid.build(sch, module_args, module_outs) + h_module = te.hybrid.build(sch, module_args, module_outs) return h_module, module_args, module_outs @@ -146,7 +146,7 @@ def test_outer_product(): temp = util.tempdir() path = temp.relpath('%s.py' % func.name) func.save(path) - func_ = tvm.hybrid.HybridModule() + func_ = te.hybrid.HybridModule() func_.load(path) run_and_check(func_, ins, {n: 99, m: 101}, outs=outs) @@ -348,7 +348,7 @@ def test_bind(): run_and_check(func, ins, outs=outs, target='cuda') - @tvm.hybrid.script + @te.hybrid.script def foo(a): c = output_tensor((a.shape[0],), a.dtype) total = allocate((1,), a.dtype, 'local') @@ -370,7 +370,7 @@ def test_bind(): func, ins, outs = run_and_check(foo, [a], target='cuda') run_and_check(func, ins, outs=outs, target='cuda') - @tvm.hybrid.script + @te.hybrid.script def max_threads(a): b = output_tensor(a.shape, a.dtype) n = a.shape[0] @@ -433,7 +433,7 @@ def test_math_intrin(): # test non caconical loops def test_non_zero(): - @tvm.hybrid.script + @te.hybrid.script def blur(a): b = output_tensor((30, 30), 'float32') for i in range(2, 32): @@ -449,7 +449,7 @@ def test_non_zero(): func, ins, outs = run_and_check(blur, [a]) run_and_check(func, ins, outs=outs) - @tvm.hybrid.script + @te.hybrid.script def triangle(a, b): c = output_tensor((10, 10), dtype='float32') for i in range(10): @@ -464,7 +464,7 @@ def test_non_zero(): run_and_check(func, ins, outs=outs) def test_allocate(): - @tvm.hybrid.script + @te.hybrid.script def blur2d(a): b = output_tensor((30, 30), 'float32') for i in range(30): @@ -483,7 +483,7 @@ def test_allocate(): run_and_check(func, ins, outs=outs) if tvm.gpu().exist: - @tvm.hybrid.script + @te.hybrid.script def share_vec_add(a, b): c = output_tensor((256, ), 'float32') shared = allocate((256, ), 'float32', 'shared') @@ -505,7 +505,7 @@ def test_allocate(): print('[Warning] No GPU found! Skip shared mem test!') def test_upstream(): - @tvm.hybrid.script + @te.hybrid.script def upstream(a): b = output_tensor((20, ), 'float32') for i in range(20): @@ -535,7 +535,7 @@ def test_upstream(): tvm.testing.assert_allclose(tvm_d.asnumpy(), ref, 1e-5, 1e-5) def test_downstream(): - @tvm.hybrid.script + @te.hybrid.script def downstream(a): b = output_tensor((20, ), 'float32') for i in range(20): @@ -562,7 +562,7 @@ def test_downstream(): tvm.testing.assert_allclose(tvm_c.asnumpy(), ref, 1e-5, 1e-5) def test_const_param(): - @tvm.hybrid.script + @te.hybrid.script def add_something(a, b): c = output_tensor((11, ), 'int32') for i in range(11): @@ -588,7 +588,7 @@ def test_const_param(): tvm.testing.assert_allclose(nd_c.asnumpy(), ref, 1e-5, 1e-5) def test_value_index(): - @tvm.hybrid.script + @te.hybrid.script def kernel_a(a): b = output_tensor((16, ), 'int32') c = output_tensor((4, 4), 'int32') @@ -597,7 +597,7 @@ def test_value_index(): c[i // 4, i % 4] = a[i] + 1 return b, c - @tvm.hybrid.script + @te.hybrid.script def kernel_b(b, a): c = output_tensor((4, 4), 'int32') for i in range(4): @@ -621,7 +621,7 @@ def test_value_index(): tvm.testing.assert_allclose(res.asnumpy(), ref) def test_func_call(): - @tvm.hybrid.script + @te.hybrid.script def foo(a, b): for i in range(len(a)): a[i] = i + 1.0 @@ -640,7 +640,7 @@ def test_func_call(): run_and_check(func, ins, outs=outs) def test_bool(): - @tvm.hybrid.script + @te.hybrid.script def foo(a): b = output_tensor(a.shape, a.dtype) b[0] = 1.2 @@ -655,7 +655,7 @@ def test_bool(): run_and_check(func, ins, outs=outs) def test_const_range(): - @tvm.hybrid.script + @te.hybrid.script def foo(a, b): c = output_tensor(a.shape, a.dtype) d = output_tensor(a.shape, 'int32') @@ -675,7 +675,7 @@ def test_const_range(): func, ins, outs = run_and_check(foo, [a, b]) run_and_check(func, ins, outs=outs) - @tvm.hybrid.script + @te.hybrid.script def goo(a, b): c = output_tensor(a.shape, a.dtype) len_b = len(b) @@ -692,7 +692,7 @@ def test_const_range(): func, ins, outs = run_and_check(goo, [a, b]) run_and_check(func, ins, outs=outs) - @tvm.hybrid.script + @te.hybrid.script def hoo(a, b): c = output_tensor(a.shape, a.dtype) len_b = len(b) @@ -779,7 +779,7 @@ def test_capture(): constant_list = [[1, 2], [3, n]] const_value = 1 - @tvm.hybrid.script + @te.hybrid.script def add_something(a): c = output_tensor((constant_tuple[1],), 'int32') for i in range(constant_tuple[1]): diff --git a/tests/python/unittest/test_tir_pass_storage_rewrite.py b/tests/python/unittest/test_tir_pass_storage_rewrite.py index c74225d..b36d86b 100644 --- a/tests/python/unittest/test_tir_pass_storage_rewrite.py +++ b/tests/python/unittest/test_tir_pass_storage_rewrite.py @@ -495,7 +495,7 @@ def test_replace_dataflow(): def test_large_input(): - @tvm.hybrid.script + @te.hybrid.script def compute(a, b): n = 16384 c = output_tensor((n, n), 'int32') diff --git a/topi/python/topi/argwhere.py b/topi/python/topi/argwhere.py index 7d8429b..d4bac62 100644 --- a/topi/python/topi/argwhere.py +++ b/topi/python/topi/argwhere.py @@ -16,7 +16,7 @@ # under the License. # pylint: disable=invalid-name, too-many-arguments, too-many-nested-blocks """Argwhere operator""" -from tvm import hybrid +from tvm.te import hybrid @hybrid.script def hybrid_argwhere_1d(output_shape, condition): diff --git a/topi/python/topi/vision/nms.py b/topi/python/topi/vision/nms.py index d95ca75..28598de 100644 --- a/topi/python/topi/vision/nms.py +++ b/topi/python/topi/vision/nms.py @@ -19,7 +19,7 @@ import tvm from tvm import te -from tvm import hybrid +from tvm.te import hybrid from ..sort import argsort @hybrid.script diff --git a/topi/python/topi/vision/ssd/multibox.py b/topi/python/topi/vision/ssd/multibox.py index 8f287b9..ba0cf54 100644 --- a/topi/python/topi/vision/ssd/multibox.py +++ b/topi/python/topi/vision/ssd/multibox.py @@ -18,7 +18,7 @@ """SSD multibox operators""" import tvm -from tvm import hybrid +from tvm.te import hybrid from tvm.tir import exp, sqrt import topi diff --git a/topi/python/topi/x86/roi_align.py b/topi/python/topi/x86/roi_align.py index 205d709..e5cfcfe 100644 --- a/topi/python/topi/x86/roi_align.py +++ b/topi/python/topi/x86/roi_align.py @@ -19,7 +19,7 @@ import math import tvm -from tvm import hybrid +from tvm.te import hybrid from ..tensor import full from ..util import get_const_tuple -- 2.7.4