[REFACTOR] tvm.hybrid -> te.hybrid (#5223)
authorTianqi Chen <tqchen@users.noreply.github.com>
Thu, 2 Apr 2020 23:56:24 +0000 (16:56 -0700)
committerGitHub <noreply@github.com>
Thu, 2 Apr 2020 23:56:24 +0000 (16:56 -0700)
Rationale: The current hybrid module is more aligned with the te part.
We might consider add a new varient of hybrid script that support the unified IR later.
This refactor paves for the potential later changes.

23 files changed:
docs/api/python/hybrid.rst [deleted file]
docs/api/python/index.rst
docs/api/python/te.rst
docs/langref/hybrid_script.rst
python/tvm/__init__.py
python/tvm/relay/op/_reduce.py
python/tvm/relay/op/_tensor.py
python/tvm/relay/op/_transform.py
python/tvm/relay/op/nn/_nn.py
python/tvm/te/__init__.py
python/tvm/te/hybrid/__init__.py [moved from python/tvm/hybrid/__init__.py with 97% similarity]
python/tvm/te/hybrid/calls.py [moved from python/tvm/hybrid/calls.py with 100% similarity]
python/tvm/te/hybrid/module.py [moved from python/tvm/hybrid/module.py with 97% similarity]
python/tvm/te/hybrid/parser.py [moved from python/tvm/hybrid/parser.py with 100% similarity]
python/tvm/te/hybrid/preprocessor.py [moved from python/tvm/hybrid/preprocessor.py with 100% similarity]
python/tvm/te/hybrid/runtime.py [moved from python/tvm/hybrid/runtime.py with 99% similarity]
python/tvm/te/hybrid/util.py [moved from python/tvm/hybrid/util.py with 100% similarity]
tests/python/unittest/test_te_hybrid_script.py [moved from tests/python/unittest/test_hybrid_script.py with 97% similarity]
tests/python/unittest/test_tir_pass_storage_rewrite.py
topi/python/topi/argwhere.py
topi/python/topi/vision/nms.py
topi/python/topi/vision/ssd/multibox.py
topi/python/topi/x86/roi_align.py

diff --git a/docs/api/python/hybrid.rst b/docs/api/python/hybrid.rst
deleted file mode 100644 (file)
index 1184c83..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-..  Licensed to the Apache Software Foundation (ASF) under one
-    or more contributor license agreements.  See the NOTICE file
-    distributed with this work for additional information
-    regarding copyright ownership.  The ASF licenses this file
-    to you under the Apache License, Version 2.0 (the
-    "License"); you may not use this file except in compliance
-    with the License.  You may obtain a copy of the License at
-
-..    http://www.apache.org/licenses/LICENSE-2.0
-
-..  Unless required by applicable law or agreed to in writing,
-    software distributed under the License is distributed on an
-    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-    KIND, either express or implied.  See the License for the
-    specific language governing permissions and limitations
-    under the License.
-
-tvm.hybrid
-----------
-.. automodule:: tvm.hybrid
-   :members:
-   :imported-members:
-   :autosummary:
index f62a4b8..09da9f1 100644 (file)
@@ -33,7 +33,6 @@ Python API
    rpc
    contrib
    graph_runtime
-   hybrid
    relay/index
    vta/index
    topi
index 363dae6..83e0042 100644 (file)
@@ -23,3 +23,11 @@ tvm.te
    :members:
    :imported-members:
    :autosummary:
+
+
+tvm.te.hybrid
+-------------
+.. automodule:: tvm.te.hybrid
+   :members:
+   :imported-members:
+   :autosummary:
index 82c4f3c..5fae67d 100644 (file)
@@ -33,11 +33,11 @@ Software Emulation
 ~~~~~~~~~~~~~~~~~~
 
 Both software emulation and compilation are supported. To define a function,
-you need to use ``tvm.hybrid.script`` decorator to indicate this is a hybrid function:
+you need to use ``tvm.te.hybrid.script`` decorator to indicate this is a hybrid function:
 
 .. code-block:: python
 
-    @tvm.hybrid.script
+    @tvm.te.hybrid.script
     def outer_product(a, b, c):
         c = output_tensor((100, 99), 'float32')
         for i in range(a.shape[0]):
@@ -85,7 +85,7 @@ to LLVM module.
 Tuning
 ~~~~~~
 
-Follow up the example above, you can use some tvm like interfaces to tune the code: 
+Follow up the example above, you can use some tvm like interfaces to tune the code:
 
 .. code-block:: python
 
@@ -101,7 +101,7 @@ loop manipulation (``split`` and ``fuse``), and ``reorder``.
 
         This is a preliminary function, so users should be in charge of the correctness
         of the functionality after tuning. Specifically, users should be careful when
-        fusing and reorderding imperfect loops. 
+        fusing and reorderding imperfect loops.
 
 Loops
 ~~~~~
@@ -119,7 +119,7 @@ Users can access containers by either constants or constants loops annotated.
 
 .. code-block:: python
 
-   @tvm.hybrid.script
+   @tvm.te.hybrid.script
    def foo(a, b): # b is a tvm.container.Array
        c = output_tensor(a.shape, a.dtype)
        for i in const_range(len(a)): # because you have b access, i should be explicitly annotated as const_range
index 0c4ca13..916708a 100644 (file)
@@ -57,9 +57,6 @@ from . import testing
 # tvm.driver
 from .driver import build, lower
 
-# tvm.hybrid
-from . import hybrid
-
 # others
 from . import arith
 
index ab8b7c2..0eeeb95 100644 (file)
@@ -18,9 +18,9 @@
 from __future__ import absolute_import
 
 from tvm.runtime import convert
+from tvm.te.hybrid import script
 from topi.util import get_const_int, get_const_tuple
 from . import op as _reg
-from ...hybrid import script
 
 _reg.register_reduce_schedule("argmax")
 _reg.register_reduce_schedule("argmin")
index eb35501..f24da05 100644 (file)
 import topi
 
 from tvm.runtime import convert
+from tvm.te.hybrid import script
 from topi.util import get_const_tuple
 from .op import register_compute, register_shape_func
 from .op import register_broadcast_schedule, register_injective_schedule
 from .op import register_pattern, OpPattern
-from ...hybrid import script
 
 
 register_broadcast_schedule("log")
index 1f85e31..ee23fce 100644 (file)
 from __future__ import absolute_import
 import tvm
 from tvm import te
+from tvm.te.hybrid import script
 from tvm.runtime import convert
 import topi
 from topi.util import get_const_int, get_const_tuple
 from . import op as _reg
 from . import strategy
 from .op import OpPattern
-from ...hybrid import script
 
 _reg.register_broadcast_schedule("broadcast_to")
 _reg.register_broadcast_schedule("broadcast_to_like")
index aa35fa2..65a1162 100644 (file)
@@ -22,11 +22,11 @@ import topi
 from topi.util import get_const_tuple
 
 from tvm.runtime import convert
+from tvm.te.hybrid import script
 from .. import op as reg
 from .. import strategy
 from ..op import OpPattern
 from .._tensor import elemwise_shape_func
-from ....hybrid import script
 
 # relu
 reg.register_broadcast_schedule("nn.relu")
index 1ba5549..0016160 100644 (file)
@@ -34,3 +34,4 @@ from .operation import thread_axis, reduce_axis
 
 from .tensor import PlaceholderOp, ComputeOp, TensorComputeOp, ScanOp, ExternOp, HybridOp
 from .autodiff import gradient
+from . import hybrid
similarity index 97%
rename from python/tvm/hybrid/__init__.py
rename to python/tvm/te/hybrid/__init__.py
index 6829496..31acaeb 100644 (file)
@@ -31,8 +31,7 @@ HalideIR.
 import inspect
 import tvm._ffi
 from tvm.driver.build_module import form_body
-
-from .._ffi.base import decorate
+from tvm._ffi.base import decorate
 
 from .module import HybridModule
 from .parser import source_to_op
@@ -95,4 +94,4 @@ def build(sch, inputs, outputs, name="hybrid_func"):
     return HybridModule(src, name)
 
 
-tvm._ffi._init_api("tvm.hybrid")
+tvm._ffi._init_api("tvm.hybrid", __name__)
similarity index 97%
rename from python/tvm/hybrid/module.py
rename to python/tvm/te/hybrid/module.py
index 9811ae1..48b483e 100644 (file)
@@ -23,7 +23,7 @@ To enable this feature, you need to build with -DUSE_HYBRID_DUMP=ON.
 
 import ast
 
-from ..contrib import util
+from tvm.contrib import util
 from .util import _internal_assert
 from .util import _is_tvm_arg_types
 from .parser import source_to_op
@@ -52,7 +52,7 @@ class HybridModule(object):
             temp = util.tempdir()
             dst = temp.relpath("script.py")
             with open(dst, 'w') as f:
-                f.write("import tvm\n@tvm.hybrid.script\n%s" % src)
+                f.write("import tvm\n@tvm.te.hybrid.script\n%s" % src)
 
             if name is not None:
                 self.name = name
similarity index 99%
rename from python/tvm/hybrid/runtime.py
rename to python/tvm/te/hybrid/runtime.py
index 9f92b80..7dcfc7c 100644 (file)
@@ -17,7 +17,7 @@
 """Intrinsics of TVM-Python Hybrid Script for Python emulation runtime"""
 
 import numpy
-from .. import target
+from tvm import target
 
 
 class bind(object): #pylint: disable=invalid-name
similarity index 97%
rename from tests/python/unittest/test_hybrid_script.py
rename to tests/python/unittest/test_te_hybrid_script.py
index 5a56cc3..f1e8967 100644 (file)
@@ -18,8 +18,8 @@ import tvm, inspect, sys, traceback, numpy, pytest, types, os
 
 from tvm import te
 from tvm.contrib import util
-from tvm.hybrid import script
-from tvm.hybrid.runtime import HYBRID_GLOBALS
+from tvm.te.hybrid import script
+from tvm.te.hybrid.runtime import HYBRID_GLOBALS
 
 @pytest.mark.skip
 def run_and_check(func, args, var_dict={}, target='llvm', sch=None, outs=None):
@@ -80,7 +80,7 @@ def run_and_check(func, args, var_dict={}, target='llvm', sch=None, outs=None):
 
     module_args = [i for i in args if isinstance(i, (te.tensor.Tensor, tvm.tir.Var))]
     module_outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
-    h_module = tvm.hybrid.build(sch, module_args, module_outs)
+    h_module = te.hybrid.build(sch, module_args, module_outs)
 
     return h_module, module_args, module_outs
 
@@ -146,7 +146,7 @@ def test_outer_product():
     temp = util.tempdir()
     path = temp.relpath('%s.py' % func.name)
     func.save(path)
-    func_ = tvm.hybrid.HybridModule()
+    func_ = te.hybrid.HybridModule()
     func_.load(path)
     run_and_check(func_, ins, {n: 99, m: 101}, outs=outs)
 
@@ -348,7 +348,7 @@ def test_bind():
     run_and_check(func, ins, outs=outs, target='cuda')
 
 
-    @tvm.hybrid.script
+    @te.hybrid.script
     def foo(a):
         c = output_tensor((a.shape[0],), a.dtype)
         total = allocate((1,), a.dtype, 'local')
@@ -370,7 +370,7 @@ def test_bind():
     func, ins, outs = run_and_check(foo, [a], target='cuda')
     run_and_check(func, ins, outs=outs, target='cuda')
 
-    @tvm.hybrid.script
+    @te.hybrid.script
     def max_threads(a):
         b = output_tensor(a.shape, a.dtype)
         n = a.shape[0]
@@ -433,7 +433,7 @@ def test_math_intrin():
 
 # test non caconical loops
 def test_non_zero():
-    @tvm.hybrid.script
+    @te.hybrid.script
     def blur(a):
         b = output_tensor((30, 30), 'float32')
         for i in range(2, 32):
@@ -449,7 +449,7 @@ def test_non_zero():
     func, ins, outs = run_and_check(blur, [a])
     run_and_check(func, ins, outs=outs)
 
-    @tvm.hybrid.script
+    @te.hybrid.script
     def triangle(a, b):
         c = output_tensor((10, 10), dtype='float32')
         for i in range(10):
@@ -464,7 +464,7 @@ def test_non_zero():
     run_and_check(func, ins, outs=outs)
 
 def test_allocate():
-    @tvm.hybrid.script
+    @te.hybrid.script
     def blur2d(a):
         b = output_tensor((30, 30), 'float32')
         for i in range(30):
@@ -483,7 +483,7 @@ def test_allocate():
     run_and_check(func, ins, outs=outs)
 
     if tvm.gpu().exist:
-        @tvm.hybrid.script
+        @te.hybrid.script
         def share_vec_add(a, b):
             c = output_tensor((256, ), 'float32')
             shared = allocate((256, ), 'float32', 'shared')
@@ -505,7 +505,7 @@ def test_allocate():
         print('[Warning] No GPU found! Skip shared mem test!')
 
 def test_upstream():
-    @tvm.hybrid.script
+    @te.hybrid.script
     def upstream(a):
         b = output_tensor((20, ), 'float32')
         for i in range(20):
@@ -535,7 +535,7 @@ def test_upstream():
     tvm.testing.assert_allclose(tvm_d.asnumpy(), ref, 1e-5, 1e-5)
 
 def test_downstream():
-    @tvm.hybrid.script
+    @te.hybrid.script
     def downstream(a):
         b = output_tensor((20, ), 'float32')
         for i in range(20):
@@ -562,7 +562,7 @@ def test_downstream():
     tvm.testing.assert_allclose(tvm_c.asnumpy(), ref, 1e-5, 1e-5)
 
 def test_const_param():
-    @tvm.hybrid.script
+    @te.hybrid.script
     def add_something(a, b):
         c = output_tensor((11, ), 'int32')
         for i in range(11):
@@ -588,7 +588,7 @@ def test_const_param():
     tvm.testing.assert_allclose(nd_c.asnumpy(), ref, 1e-5, 1e-5)
 
 def test_value_index():
-    @tvm.hybrid.script
+    @te.hybrid.script
     def kernel_a(a):
         b = output_tensor((16, ), 'int32')
         c = output_tensor((4, 4), 'int32')
@@ -597,7 +597,7 @@ def test_value_index():
             c[i // 4, i % 4] = a[i] + 1
         return b, c
 
-    @tvm.hybrid.script
+    @te.hybrid.script
     def kernel_b(b, a):
         c = output_tensor((4, 4), 'int32')
         for i in range(4):
@@ -621,7 +621,7 @@ def test_value_index():
     tvm.testing.assert_allclose(res.asnumpy(), ref)
 
 def test_func_call():
-    @tvm.hybrid.script
+    @te.hybrid.script
     def foo(a, b):
         for i in range(len(a)):
             a[i] = i + 1.0
@@ -640,7 +640,7 @@ def test_func_call():
     run_and_check(func, ins, outs=outs)
 
 def test_bool():
-    @tvm.hybrid.script
+    @te.hybrid.script
     def foo(a):
         b = output_tensor(a.shape, a.dtype)
         b[0] = 1.2
@@ -655,7 +655,7 @@ def test_bool():
     run_and_check(func, ins, outs=outs)
 
 def test_const_range():
-    @tvm.hybrid.script
+    @te.hybrid.script
     def foo(a, b):
         c = output_tensor(a.shape, a.dtype)
         d = output_tensor(a.shape, 'int32')
@@ -675,7 +675,7 @@ def test_const_range():
     func, ins, outs = run_and_check(foo, [a, b])
     run_and_check(func, ins, outs=outs)
 
-    @tvm.hybrid.script
+    @te.hybrid.script
     def goo(a, b):
         c = output_tensor(a.shape, a.dtype)
         len_b = len(b)
@@ -692,7 +692,7 @@ def test_const_range():
     func, ins, outs = run_and_check(goo, [a, b])
     run_and_check(func, ins, outs=outs)
 
-    @tvm.hybrid.script
+    @te.hybrid.script
     def hoo(a, b):
         c = output_tensor(a.shape, a.dtype)
         len_b = len(b)
@@ -779,7 +779,7 @@ def test_capture():
     constant_list = [[1, 2], [3, n]]
     const_value = 1
 
-    @tvm.hybrid.script
+    @te.hybrid.script
     def add_something(a):
         c = output_tensor((constant_tuple[1],), 'int32')
         for i in range(constant_tuple[1]):
index c74225d..b36d86b 100644 (file)
@@ -495,7 +495,7 @@ def test_replace_dataflow():
 
 
 def test_large_input():
-    @tvm.hybrid.script
+    @te.hybrid.script
     def compute(a, b):
         n = 16384
         c = output_tensor((n, n), 'int32')
index 7d8429b..d4bac62 100644 (file)
@@ -16,7 +16,7 @@
 # under the License.
 # pylint: disable=invalid-name, too-many-arguments, too-many-nested-blocks
 """Argwhere operator"""
-from tvm import hybrid
+from tvm.te import hybrid
 
 @hybrid.script
 def hybrid_argwhere_1d(output_shape, condition):
index d95ca75..28598de 100644 (file)
@@ -19,7 +19,7 @@
 import tvm
 from tvm import te
 
-from tvm import hybrid
+from tvm.te import hybrid
 from ..sort import argsort
 
 @hybrid.script
index 8f287b9..ba0cf54 100644 (file)
@@ -18,7 +18,7 @@
 """SSD multibox operators"""
 import tvm
 
-from tvm import hybrid
+from tvm.te import hybrid
 from tvm.tir import exp, sqrt
 
 import topi
index 205d709..e5cfcfe 100644 (file)
@@ -19,7 +19,7 @@
 import math
 import tvm
 
-from tvm import hybrid
+from tvm.te import hybrid
 from ..tensor import full
 from ..util import get_const_tuple