[REFACTOR] topi -> tvm/topi (#6186)
authorTianqi Chen <tqchen@users.noreply.github.com>
Sun, 2 Aug 2020 16:29:29 +0000 (09:29 -0700)
committerGitHub <noreply@github.com>
Sun, 2 Aug 2020 16:29:29 +0000 (09:29 -0700)
This PR migrates the topi library as a sub namespace of tvm.

486 files changed:
CMakeLists.txt
Makefile
apps/android_camera/app/src/main/jni/Android.mk
apps/android_deploy/app/src/main/jni/Android.mk
apps/android_rpc/app/src/main/jni/Android.mk
apps/sgx/build.rs
apps/topi_recipe/README.md [moved from topi/README.md with 86% similarity]
apps/topi_recipe/broadcast/test_broadcast_map.py [moved from topi/recipe/broadcast/test_broadcast_map.py with 99% similarity]
apps/topi_recipe/conv/depthwise_conv2d_test.py [moved from topi/recipe/conv/depthwise_conv2d_test.py with 95% similarity]
apps/topi_recipe/conv/test_conv2d_hwcn_map.py [moved from topi/recipe/conv/test_conv2d_hwcn_map.py with 95% similarity]
apps/topi_recipe/conv/test_conv_int8_arm.py [moved from topi/recipe/conv/test_conv_int8_arm.py with 99% similarity]
apps/topi_recipe/conv/test_conv_int8_intel.py [moved from topi/recipe/conv/test_conv_int8_intel.py with 99% similarity]
apps/topi_recipe/gemm/android_gemm_square.py [moved from topi/recipe/gemm/android_gemm_square.py with 100% similarity]
apps/topi_recipe/gemm/cuda_gemm_square.py [moved from topi/recipe/gemm/cuda_gemm_square.py with 100% similarity]
apps/topi_recipe/gemm/gemm_int8.py [moved from topi/recipe/gemm/gemm_int8.py with 99% similarity]
apps/topi_recipe/reduce/test_reduce_map.py [moved from topi/recipe/reduce/test_reduce_map.py with 99% similarity]
apps/topi_recipe/rnn/lstm.py [moved from topi/recipe/rnn/lstm.py with 100% similarity]
apps/topi_recipe/rnn/matexp.py [moved from topi/recipe/rnn/matexp.py with 100% similarity]
conda/tvm/build.sh
docker/Dockerfile.demo_android
docker/Dockerfile.demo_cpu
docker/Dockerfile.demo_gpu
docker/Dockerfile.demo_opencl
docker/bash.sh
docs/Doxyfile
docs/api/python/index.rst
docs/api/python/topi.rst
docs/conf.py
docs/contribute/pull_request.rst
docs/dev/codebase_walkthrough.rst
docs/dev/index.rst
docs/install/from_source.rst
docs/langref/relay_expr.rst
include/tvm/topi/broadcast.h [moved from topi/include/topi/broadcast.h with 98% similarity]
include/tvm/topi/contrib/cublas.h [moved from topi/include/topi/contrib/cublas.h with 93% similarity]
include/tvm/topi/contrib/rocblas.h [moved from topi/include/topi/contrib/rocblas.h with 90% similarity]
include/tvm/topi/cuda/dense.h [moved from topi/include/topi/cuda/dense.h with 93% similarity]
include/tvm/topi/cuda/injective.h [moved from topi/include/topi/cuda/injective.h with 91% similarity]
include/tvm/topi/cuda/normalization.h [moved from topi/include/topi/cuda/normalization.h with 93% similarity]
include/tvm/topi/cuda/pooling.h [moved from topi/include/topi/cuda/pooling.h with 95% similarity]
include/tvm/topi/cuda/reduction.h [moved from topi/include/topi/cuda/reduction.h with 96% similarity]
include/tvm/topi/cuda/softmax.h [moved from topi/include/topi/cuda/softmax.h with 93% similarity]
include/tvm/topi/detail/array_utils.h [moved from topi/include/topi/detail/array_utils.h with 89% similarity]
include/tvm/topi/detail/broadcast.h [moved from topi/include/topi/detail/broadcast.h with 96% similarity]
include/tvm/topi/detail/constant_utils.h [moved from topi/include/topi/detail/constant_utils.h with 95% similarity]
include/tvm/topi/detail/extern.h [moved from topi/include/topi/detail/extern.h with 97% similarity]
include/tvm/topi/detail/fuse.h [moved from topi/include/topi/detail/fuse.h with 90% similarity]
include/tvm/topi/detail/pad_utils.h [moved from topi/include/topi/detail/pad_utils.h with 91% similarity]
include/tvm/topi/detail/ravel_unravel.h [moved from topi/include/topi/detail/ravel_unravel.h with 92% similarity]
include/tvm/topi/detail/tensor_utils.h [moved from topi/include/topi/detail/tensor_utils.h with 94% similarity]
include/tvm/topi/elemwise.h [moved from topi/include/topi/elemwise.h with 99% similarity]
include/tvm/topi/generic/default.h [moved from topi/include/topi/generic/default.h with 91% similarity]
include/tvm/topi/generic/extern.h [moved from topi/include/topi/generic/extern.h with 87% similarity]
include/tvm/topi/generic/injective.h [moved from topi/include/topi/generic/injective.h with 90% similarity]
include/tvm/topi/nn.h [moved from topi/include/topi/nn.h with 99% similarity]
include/tvm/topi/nn/batch_matmul.h [moved from topi/include/topi/nn/batch_matmul.h with 91% similarity]
include/tvm/topi/nn/bias_add.h [moved from topi/include/topi/nn/bias_add.h with 87% similarity]
include/tvm/topi/nn/bnn.h [moved from topi/include/topi/nn/bnn.h with 95% similarity]
include/tvm/topi/nn/dense.h [moved from topi/include/topi/nn/dense.h with 93% similarity]
include/tvm/topi/nn/dilate.h [moved from topi/include/topi/nn/dilate.h with 95% similarity]
include/tvm/topi/nn/flatten.h [moved from topi/include/topi/nn/flatten.h with 91% similarity]
include/tvm/topi/nn/local_response_norm.h [moved from topi/include/topi/nn/local_response_norm.h with 93% similarity]
include/tvm/topi/nn/mapping.h [moved from topi/include/topi/nn/mapping.h with 93% similarity]
include/tvm/topi/nn/pooling.h [moved from topi/include/topi/nn/pooling.h with 99% similarity]
include/tvm/topi/nn/softmax.h [moved from topi/include/topi/nn/softmax.h with 96% similarity]
include/tvm/topi/reduction.h [moved from topi/include/topi/reduction.h with 98% similarity]
include/tvm/topi/rocm/dense.h [moved from topi/include/topi/rocm/dense.h with 89% similarity]
include/tvm/topi/rocm/injective.h [moved from topi/include/topi/rocm/injective.h with 88% similarity]
include/tvm/topi/rocm/normalization.h [moved from topi/include/topi/rocm/normalization.h with 87% similarity]
include/tvm/topi/rocm/pooling.h [moved from topi/include/topi/rocm/pooling.h with 86% similarity]
include/tvm/topi/rocm/reduction.h [moved from topi/include/topi/rocm/reduction.h with 85% similarity]
include/tvm/topi/rocm/softmax.h [moved from topi/include/topi/rocm/softmax.h with 85% similarity]
include/tvm/topi/tags.h [moved from topi/include/topi/tags.h with 94% similarity]
include/tvm/topi/transform.h [moved from topi/include/topi/transform.h with 99% similarity]
include/tvm/topi/util.h [moved from topi/include/topi/util.h with 92% similarity]
include/tvm/topi/vision/reorg.h [moved from topi/include/topi/vision/reorg.h with 89% similarity]
include/tvm/topi/x86/bnn.h [moved from topi/include/topi/x86/bnn.h with 94% similarity]
include/tvm/topi/x86/default.h [moved from topi/include/topi/x86/default.h with 93% similarity]
include/tvm/topi/x86/injective.h [moved from topi/include/topi/x86/injective.h with 91% similarity]
python/tvm/autotvm/graph_tuner/base_graph_tuner.py
python/tvm/autotvm/task/relay_integration.py
python/tvm/relay/frontend/common.py
python/tvm/relay/frontend/mxnet.py
python/tvm/relay/frontend/tensorflow.py
python/tvm/relay/op/_reduce.py
python/tvm/relay/op/_tensor.py
python/tvm/relay/op/_tensor_grad.py
python/tvm/relay/op/_transform.py
python/tvm/relay/op/dyn/_tensor.py
python/tvm/relay/op/image/_image.py
python/tvm/relay/op/image/image.py
python/tvm/relay/op/nn/_nn.py
python/tvm/relay/op/nn/nn.py
python/tvm/relay/op/strategy/arm_cpu.py
python/tvm/relay/op/strategy/bifrost.py
python/tvm/relay/op/strategy/cuda.py
python/tvm/relay/op/strategy/generic.py
python/tvm/relay/op/strategy/hls.py
python/tvm/relay/op/strategy/intel_graphics.py
python/tvm/relay/op/strategy/mali.py
python/tvm/relay/op/strategy/rocm.py
python/tvm/relay/op/strategy/x86.py
python/tvm/relay/op/vision/_rcnn.py
python/tvm/relay/op/vision/_vision.py
python/tvm/relay/quantize/_annotate.py
python/tvm/te/hybrid/util.py
python/tvm/topi/__init__.py [moved from topi/python/topi/__init__.py with 97% similarity]
python/tvm/topi/argwhere.py [moved from topi/python/topi/argwhere.py with 100% similarity]
python/tvm/topi/arm_cpu/__init__.py [moved from topi/python/topi/arm_cpu/__init__.py with 100% similarity]
python/tvm/topi/arm_cpu/bitserial_conv2d.py [moved from topi/python/topi/arm_cpu/bitserial_conv2d.py with 100% similarity]
python/tvm/topi/arm_cpu/bitserial_dense.py [moved from topi/python/topi/arm_cpu/bitserial_dense.py with 99% similarity]
python/tvm/topi/arm_cpu/conv2d.py [moved from topi/python/topi/arm_cpu/conv2d.py with 100% similarity]
python/tvm/topi/arm_cpu/conv2d_alter_op.py [moved from topi/python/topi/arm_cpu/conv2d_alter_op.py with 100% similarity]
python/tvm/topi/arm_cpu/conv2d_gemm.py [moved from topi/python/topi/arm_cpu/conv2d_gemm.py with 99% similarity]
python/tvm/topi/arm_cpu/conv2d_int8.py [moved from topi/python/topi/arm_cpu/conv2d_int8.py with 100% similarity]
python/tvm/topi/arm_cpu/conv2d_spatial_pack.py [moved from topi/python/topi/arm_cpu/conv2d_spatial_pack.py with 100% similarity]
python/tvm/topi/arm_cpu/conv2d_transpose.py [moved from topi/python/topi/arm_cpu/conv2d_transpose.py with 100% similarity]
python/tvm/topi/arm_cpu/cortex_m7/__init__.py [moved from topi/python/topi/arm_cpu/cortex_m7/__init__.py with 100% similarity]
python/tvm/topi/arm_cpu/cortex_m7/conv2d/__init__.py [moved from topi/python/topi/arm_cpu/cortex_m7/conv2d/__init__.py with 100% similarity]
python/tvm/topi/arm_cpu/cortex_m7/conv2d/direct.py [moved from topi/python/topi/arm_cpu/cortex_m7/conv2d/direct.py with 98% similarity]
python/tvm/topi/arm_cpu/cortex_m7/conv2d/direct_simd.py [moved from topi/python/topi/arm_cpu/cortex_m7/conv2d/direct_simd.py with 98% similarity]
python/tvm/topi/arm_cpu/cortex_m7/micro_kernel/__init__.py [moved from topi/python/topi/arm_cpu/cortex_m7/micro_kernel/__init__.py with 100% similarity]
python/tvm/topi/arm_cpu/cortex_m7/micro_kernel/gemm.py [moved from topi/python/topi/arm_cpu/cortex_m7/micro_kernel/gemm.py with 100% similarity]
python/tvm/topi/arm_cpu/depthwise_conv2d.py [moved from topi/python/topi/arm_cpu/depthwise_conv2d.py with 100% similarity]
python/tvm/topi/arm_cpu/injective.py [moved from topi/python/topi/arm_cpu/injective.py with 100% similarity]
python/tvm/topi/arm_cpu/tensor_intrin.py [moved from topi/python/topi/arm_cpu/tensor_intrin.py with 100% similarity]
python/tvm/topi/bifrost/__init__.py [moved from topi/python/topi/bifrost/__init__.py with 100% similarity]
python/tvm/topi/bifrost/conv2d.py [moved from topi/python/topi/bifrost/conv2d.py with 100% similarity]
python/tvm/topi/bifrost/dense.py [moved from topi/python/topi/bifrost/dense.py with 100% similarity]
python/tvm/topi/bifrost/depthwise_conv2d.py [moved from topi/python/topi/bifrost/depthwise_conv2d.py with 100% similarity]
python/tvm/topi/bifrost/gemm.py [moved from topi/python/topi/bifrost/gemm.py with 100% similarity]
python/tvm/topi/bifrost/transforms.py [moved from topi/python/topi/bifrost/transforms.py with 100% similarity]
python/tvm/topi/broadcast.py [moved from topi/python/topi/broadcast.py with 100% similarity]
python/tvm/topi/cpp/__init__.py [moved from topi/python/topi/cpp/__init__.py with 100% similarity]
python/tvm/topi/cpp/cuda.py [moved from topi/python/topi/cpp/cuda.py with 94% similarity]
python/tvm/topi/cpp/generic.py [moved from topi/python/topi/cpp/generic.py with 93% similarity]
python/tvm/topi/cpp/impl.py [new file with mode: 0644]
python/tvm/topi/cpp/nn.py [moved from topi/python/topi/cpp/nn.py with 94% similarity]
python/tvm/topi/cpp/rocm.py [moved from topi/python/topi/cpp/rocm.py with 94% similarity]
python/tvm/topi/cpp/util.py [moved from topi/python/topi/cpp/util.py with 94% similarity]
python/tvm/topi/cpp/vision/__init__.py [moved from topi/python/topi/cpp/vision/__init__.py with 93% similarity]
python/tvm/topi/cpp/vision/yolo.py [moved from topi/python/topi/cpp/vision/yolo.py with 92% similarity]
python/tvm/topi/cpp/x86.py [moved from topi/python/topi/cpp/x86.py with 94% similarity]
python/tvm/topi/cuda/__init__.py [moved from topi/python/topi/cuda/__init__.py with 100% similarity]
python/tvm/topi/cuda/batch_matmul.py [moved from topi/python/topi/cuda/batch_matmul.py with 100% similarity]
python/tvm/topi/cuda/conv1d.py [moved from topi/python/topi/cuda/conv1d.py with 100% similarity]
python/tvm/topi/cuda/conv1d_transpose_ncw.py [moved from topi/python/topi/cuda/conv1d_transpose_ncw.py with 100% similarity]
python/tvm/topi/cuda/conv2d.py [moved from topi/python/topi/cuda/conv2d.py with 100% similarity]
python/tvm/topi/cuda/conv2d_alter_op.py [moved from topi/python/topi/cuda/conv2d_alter_op.py with 100% similarity]
python/tvm/topi/cuda/conv2d_direct.py [moved from topi/python/topi/cuda/conv2d_direct.py with 100% similarity]
python/tvm/topi/cuda/conv2d_hwcn.py [moved from topi/python/topi/cuda/conv2d_hwcn.py with 100% similarity]
python/tvm/topi/cuda/conv2d_int8.py [moved from topi/python/topi/cuda/conv2d_int8.py with 100% similarity]
python/tvm/topi/cuda/conv2d_nhwc.py [moved from topi/python/topi/cuda/conv2d_nhwc.py with 100% similarity]
python/tvm/topi/cuda/conv2d_nhwc_tensorcore.py [moved from topi/python/topi/cuda/conv2d_nhwc_tensorcore.py with 100% similarity]
python/tvm/topi/cuda/conv2d_nhwc_winograd.py [moved from topi/python/topi/cuda/conv2d_nhwc_winograd.py with 100% similarity]
python/tvm/topi/cuda/conv2d_transpose_nchw.py [moved from topi/python/topi/cuda/conv2d_transpose_nchw.py with 100% similarity]
python/tvm/topi/cuda/conv2d_winograd.py [moved from topi/python/topi/cuda/conv2d_winograd.py with 100% similarity]
python/tvm/topi/cuda/conv3d.py [moved from topi/python/topi/cuda/conv3d.py with 100% similarity]
python/tvm/topi/cuda/conv3d_alter_op.py [moved from topi/python/topi/cuda/conv3d_alter_op.py with 100% similarity]
python/tvm/topi/cuda/conv3d_direct.py [moved from topi/python/topi/cuda/conv3d_direct.py with 100% similarity]
python/tvm/topi/cuda/conv3d_ndhwc_tensorcore.py [moved from topi/python/topi/cuda/conv3d_ndhwc_tensorcore.py with 100% similarity]
python/tvm/topi/cuda/conv3d_transpose_ncdhw.py [moved from topi/python/topi/cuda/conv3d_transpose_ncdhw.py with 100% similarity]
python/tvm/topi/cuda/conv3d_winograd.py [moved from topi/python/topi/cuda/conv3d_winograd.py with 100% similarity]
python/tvm/topi/cuda/correlation.py [moved from topi/python/topi/cuda/correlation.py with 100% similarity]
python/tvm/topi/cuda/deformable_conv2d.py [moved from topi/python/topi/cuda/deformable_conv2d.py with 100% similarity]
python/tvm/topi/cuda/dense.py [moved from topi/python/topi/cuda/dense.py with 100% similarity]
python/tvm/topi/cuda/dense_tensorcore.py [moved from topi/python/topi/cuda/dense_tensorcore.py with 100% similarity]
python/tvm/topi/cuda/depthwise_conv2d.py [moved from topi/python/topi/cuda/depthwise_conv2d.py with 100% similarity]
python/tvm/topi/cuda/group_conv2d_nchw.py [moved from topi/python/topi/cuda/group_conv2d_nchw.py with 100% similarity]
python/tvm/topi/cuda/injective.py [moved from topi/python/topi/cuda/injective.py with 100% similarity]
python/tvm/topi/cuda/nms.py [moved from topi/python/topi/cuda/nms.py with 100% similarity]
python/tvm/topi/cuda/nn.py [moved from topi/python/topi/cuda/nn.py with 100% similarity]
python/tvm/topi/cuda/pooling.py [moved from topi/python/topi/cuda/pooling.py with 100% similarity]
python/tvm/topi/cuda/rcnn/__init__.py [moved from topi/python/topi/cuda/rcnn/__init__.py with 100% similarity]
python/tvm/topi/cuda/rcnn/proposal.py [moved from topi/python/topi/cuda/rcnn/proposal.py with 100% similarity]
python/tvm/topi/cuda/reduction.py [moved from topi/python/topi/cuda/reduction.py with 100% similarity]
python/tvm/topi/cuda/softmax.py [moved from topi/python/topi/cuda/softmax.py with 100% similarity]
python/tvm/topi/cuda/sort.py [moved from topi/python/topi/cuda/sort.py with 100% similarity]
python/tvm/topi/cuda/sparse.py [moved from topi/python/topi/cuda/sparse.py with 100% similarity]
python/tvm/topi/cuda/ssd/__init__.py [moved from topi/python/topi/vision/ssd/__init__.py with 100% similarity]
python/tvm/topi/cuda/ssd/multibox.py [moved from topi/python/topi/cuda/ssd/multibox.py with 99% similarity]
python/tvm/topi/cuda/tensor_intrin.py [moved from topi/python/topi/cuda/tensor_intrin.py with 100% similarity]
python/tvm/topi/cuda/vision.py [moved from topi/python/topi/cuda/vision.py with 100% similarity]
python/tvm/topi/generic/__init__.py [moved from topi/python/topi/generic/__init__.py with 100% similarity]
python/tvm/topi/generic/conv2d.py [moved from topi/python/topi/generic/conv2d.py with 100% similarity]
python/tvm/topi/generic/default.py [moved from topi/python/topi/generic/default.py with 100% similarity]
python/tvm/topi/generic/extern.py [moved from topi/python/topi/generic/extern.py with 100% similarity]
python/tvm/topi/generic/image.py [moved from topi/python/topi/generic/image.py with 100% similarity]
python/tvm/topi/generic/injective.py [moved from topi/python/topi/generic/injective.py with 100% similarity]
python/tvm/topi/generic/nn.py [moved from topi/python/topi/generic/nn.py with 100% similarity]
python/tvm/topi/generic/search.py [moved from topi/python/topi/generic/search.py with 100% similarity]
python/tvm/topi/generic/sort.py [moved from topi/python/topi/generic/sort.py with 100% similarity]
python/tvm/topi/generic/vision.py [moved from topi/python/topi/generic/vision.py with 100% similarity]
python/tvm/topi/generic_op_impl.py [moved from topi/python/topi/generic_op_impl.py with 100% similarity]
python/tvm/topi/hls/__init__.py [moved from topi/python/topi/hls/__init__.py with 100% similarity]
python/tvm/topi/hls/injective.py [moved from topi/python/topi/hls/injective.py with 100% similarity]
python/tvm/topi/hls/nn.py [moved from topi/python/topi/hls/nn.py with 100% similarity]
python/tvm/topi/image/__init__.py [moved from topi/python/topi/image/__init__.py with 100% similarity]
python/tvm/topi/image/dilation2d.py [moved from topi/python/topi/image/dilation2d.py with 99% similarity]
python/tvm/topi/image/grid_sample.py [moved from topi/python/topi/image/grid_sample.py with 100% similarity]
python/tvm/topi/image/resize.py [moved from topi/python/topi/image/resize.py with 99% similarity]
python/tvm/topi/intel_graphics/__init__.py [moved from topi/python/topi/intel_graphics/__init__.py with 100% similarity]
python/tvm/topi/intel_graphics/conv2d.py [moved from topi/python/topi/intel_graphics/conv2d.py with 100% similarity]
python/tvm/topi/intel_graphics/conv2d_alter_op.py [moved from topi/python/topi/intel_graphics/conv2d_alter_op.py with 100% similarity]
python/tvm/topi/intel_graphics/depthwise_conv2d.py [moved from topi/python/topi/intel_graphics/depthwise_conv2d.py with 100% similarity]
python/tvm/topi/mali/__init__.py [moved from topi/python/topi/mali/__init__.py with 100% similarity]
python/tvm/topi/mali/conv2d.py [moved from topi/python/topi/mali/conv2d.py with 100% similarity]
python/tvm/topi/mali/dense.py [moved from topi/python/topi/mali/dense.py with 100% similarity]
python/tvm/topi/mali/depthwise_conv2d.py [moved from topi/python/topi/mali/depthwise_conv2d.py with 100% similarity]
python/tvm/topi/math.py [moved from topi/python/topi/math.py with 100% similarity]
python/tvm/topi/nn/__init__.py [moved from topi/python/topi/nn/__init__.py with 100% similarity]
python/tvm/topi/nn/batch_matmul.py [moved from topi/python/topi/nn/batch_matmul.py with 100% similarity]
python/tvm/topi/nn/bitserial_conv2d.py [moved from topi/python/topi/nn/bitserial_conv2d.py with 100% similarity]
python/tvm/topi/nn/bitserial_dense.py [moved from topi/python/topi/nn/bitserial_dense.py with 98% similarity]
python/tvm/topi/nn/bitserial_util.py [moved from topi/python/topi/nn/bitserial_util.py with 95% similarity]
python/tvm/topi/nn/bnn.py [moved from topi/python/topi/nn/bnn.py with 100% similarity]
python/tvm/topi/nn/conv1d.py [moved from topi/python/topi/nn/conv1d.py with 100% similarity]
python/tvm/topi/nn/conv1d_transpose.py [moved from topi/python/topi/nn/conv1d_transpose.py with 100% similarity]
python/tvm/topi/nn/conv2d.py [moved from topi/python/topi/nn/conv2d.py with 99% similarity]
python/tvm/topi/nn/conv2d_transpose.py [moved from topi/python/topi/nn/conv2d_transpose.py with 100% similarity]
python/tvm/topi/nn/conv3d.py [moved from topi/python/topi/nn/conv3d.py with 100% similarity]
python/tvm/topi/nn/conv3d_transpose.py [moved from topi/python/topi/nn/conv3d_transpose.py with 100% similarity]
python/tvm/topi/nn/correlation.py [moved from topi/python/topi/nn/correlation.py with 100% similarity]
python/tvm/topi/nn/deformable_conv2d.py [moved from topi/python/topi/nn/deformable_conv2d.py with 100% similarity]
python/tvm/topi/nn/dense.py [moved from topi/python/topi/nn/dense.py with 100% similarity]
python/tvm/topi/nn/depth_to_space.py [moved from topi/python/topi/nn/depth_to_space.py with 100% similarity]
python/tvm/topi/nn/depthwise_conv2d.py [moved from topi/python/topi/nn/depthwise_conv2d.py with 100% similarity]
python/tvm/topi/nn/dilate.py [moved from topi/python/topi/nn/dilate.py with 100% similarity]
python/tvm/topi/nn/elemwise.py [moved from topi/python/topi/nn/elemwise.py with 95% similarity]
python/tvm/topi/nn/fifo_buffer.py [moved from topi/python/topi/nn/fifo_buffer.py with 100% similarity]
python/tvm/topi/nn/flatten.py [moved from topi/python/topi/nn/flatten.py with 100% similarity]
python/tvm/topi/nn/local_response_norm.py [moved from topi/python/topi/nn/local_response_norm.py with 100% similarity]
python/tvm/topi/nn/mapping.py [moved from topi/python/topi/nn/mapping.py with 100% similarity]
python/tvm/topi/nn/pad.py [moved from topi/python/topi/nn/pad.py with 100% similarity]
python/tvm/topi/nn/pooling.py [moved from topi/python/topi/nn/pooling.py with 100% similarity]
python/tvm/topi/nn/softmax.py [moved from topi/python/topi/nn/softmax.py with 100% similarity]
python/tvm/topi/nn/space_to_depth.py [moved from topi/python/topi/nn/space_to_depth.py with 100% similarity]
python/tvm/topi/nn/sparse.py [moved from topi/python/topi/nn/sparse.py with 100% similarity]
python/tvm/topi/nn/upsampling.py [moved from topi/python/topi/nn/upsampling.py with 99% similarity]
python/tvm/topi/nn/util.py [moved from topi/python/topi/nn/util.py with 100% similarity]
python/tvm/topi/nn/winograd_util.py [moved from topi/python/topi/nn/winograd_util.py with 100% similarity]
python/tvm/topi/reduction.py [moved from topi/python/topi/reduction.py with 100% similarity]
python/tvm/topi/rocm/__init__.py [moved from topi/python/topi/rocm/__init__.py with 100% similarity]
python/tvm/topi/rocm/conv2d.py [moved from topi/python/topi/rocm/conv2d.py with 100% similarity]
python/tvm/topi/rocm/dense.py [moved from topi/python/topi/rocm/dense.py with 100% similarity]
python/tvm/topi/rocm/nn.py [moved from topi/python/topi/rocm/nn.py with 100% similarity]
python/tvm/topi/scatter.py [moved from topi/python/topi/scatter.py with 100% similarity]
python/tvm/topi/scatter_add.py [moved from topi/python/topi/scatter_add.py with 100% similarity]
python/tvm/topi/sort.py [moved from topi/python/topi/sort.py with 100% similarity]
python/tvm/topi/sparse/__init__.py [moved from topi/python/topi/sparse/__init__.py with 100% similarity]
python/tvm/topi/sparse/csrmm.py [moved from topi/python/topi/sparse/csrmm.py with 100% similarity]
python/tvm/topi/sparse/csrmv.py [moved from topi/python/topi/sparse/csrmv.py with 100% similarity]
python/tvm/topi/sparse/dense.py [moved from topi/python/topi/sparse/dense.py with 100% similarity]
python/tvm/topi/tag.py [moved from topi/python/topi/tag.py with 100% similarity]
python/tvm/topi/tensor.py [moved from topi/python/topi/tensor.py with 100% similarity]
python/tvm/topi/testing/__init__.py [moved from topi/python/topi/testing/__init__.py with 100% similarity]
python/tvm/topi/testing/adaptive_pool_python.py [moved from topi/python/topi/testing/adaptive_pool_python.py with 100% similarity]
python/tvm/topi/testing/batch_matmul.py [moved from topi/python/topi/testing/batch_matmul.py with 100% similarity]
python/tvm/topi/testing/bilinear_resize_python.py [moved from topi/python/topi/testing/bilinear_resize_python.py with 98% similarity]
python/tvm/topi/testing/common.py [moved from topi/python/topi/testing/common.py with 99% similarity]
python/tvm/topi/testing/conv1d_ncw_python.py [moved from topi/python/topi/testing/conv1d_ncw_python.py with 98% similarity]
python/tvm/topi/testing/conv1d_transpose_ncw_python.py [moved from topi/python/topi/testing/conv1d_transpose_ncw_python.py with 94% similarity]
python/tvm/topi/testing/conv2d_hwcn_python.py [moved from topi/python/topi/testing/conv2d_hwcn_python.py with 98% similarity]
python/tvm/topi/testing/conv2d_nchw_python.py [moved from topi/python/topi/testing/conv2d_nchw_python.py with 98% similarity]
python/tvm/topi/testing/conv2d_nhwc_python.py [moved from topi/python/topi/testing/conv2d_nhwc_python.py with 98% similarity]
python/tvm/topi/testing/conv2d_transpose_python.py [moved from topi/python/topi/testing/conv2d_transpose_python.py with 96% similarity]
python/tvm/topi/testing/conv3d_ncdhw_python.py [moved from topi/python/topi/testing/conv3d_ncdhw_python.py with 98% similarity]
python/tvm/topi/testing/conv3d_ndhwc_python.py [moved from topi/python/topi/testing/conv3d_ndhwc_python.py with 98% similarity]
python/tvm/topi/testing/conv3d_transpose_ncdhw_python.py [moved from topi/python/topi/testing/conv3d_transpose_ncdhw_python.py with 92% similarity]
python/tvm/topi/testing/correlation_nchw_python.py [moved from topi/python/topi/testing/correlation_nchw_python.py with 100% similarity]
python/tvm/topi/testing/crop_and_resize_python.py [moved from topi/python/topi/testing/crop_and_resize_python.py with 100% similarity]
python/tvm/topi/testing/deformable_conv2d_nchw_python.py [moved from topi/python/topi/testing/deformable_conv2d_nchw_python.py with 99% similarity]
python/tvm/topi/testing/depth_to_space.py [moved from topi/python/topi/testing/depth_to_space.py with 100% similarity]
python/tvm/topi/testing/depthwise_conv2d_python.py [moved from topi/python/topi/testing/depthwise_conv2d_python.py with 100% similarity]
python/tvm/topi/testing/dilate_python.py [moved from topi/python/topi/testing/dilate_python.py with 100% similarity]
python/tvm/topi/testing/gather_nd_python.py [moved from topi/python/topi/testing/gather_nd_python.py with 100% similarity]
python/tvm/topi/testing/gather_python.py [moved from topi/python/topi/testing/gather_python.py with 100% similarity]
python/tvm/topi/testing/grid_sample_python.py [moved from topi/python/topi/testing/grid_sample_python.py with 100% similarity]
python/tvm/topi/testing/l2_normalize_python.py [moved from topi/python/topi/testing/l2_normalize_python.py with 100% similarity]
python/tvm/topi/testing/lrn_python.py [moved from topi/python/topi/testing/lrn_python.py with 100% similarity]
python/tvm/topi/testing/one_hot.py [moved from topi/python/topi/testing/one_hot.py with 100% similarity]
python/tvm/topi/testing/pool1d_python.py [moved from topi/python/topi/testing/pool1d_python.py with 100% similarity]
python/tvm/topi/testing/pool3d_python.py [moved from topi/python/topi/testing/pool3d_python.py with 100% similarity]
python/tvm/topi/testing/pool_grad_python.py [moved from topi/python/topi/testing/pool_grad_python.py with 100% similarity]
python/tvm/topi/testing/reorg_python.py [moved from topi/python/topi/testing/reorg_python.py with 100% similarity]
python/tvm/topi/testing/roi_align_python.py [moved from topi/python/topi/testing/roi_align_python.py with 100% similarity]
python/tvm/topi/testing/roi_pool_python.py [moved from topi/python/topi/testing/roi_pool_python.py with 100% similarity]
python/tvm/topi/testing/sequence_mask_python.py [moved from topi/python/topi/testing/sequence_mask_python.py with 100% similarity]
python/tvm/topi/testing/slice_axis_python.py [moved from topi/python/topi/testing/slice_axis_python.py with 100% similarity]
python/tvm/topi/testing/softmax_python.py [moved from topi/python/topi/testing/softmax_python.py with 100% similarity]
python/tvm/topi/testing/space_to_depth.py [moved from topi/python/topi/testing/space_to_depth.py with 100% similarity]
python/tvm/topi/testing/strided_slice_python.py [moved from topi/python/topi/testing/strided_slice_python.py with 100% similarity]
python/tvm/topi/testing/trilinear_resize3d_python.py [moved from topi/python/topi/testing/trilinear_resize3d_python.py with 100% similarity]
python/tvm/topi/testing/upsampling_python.py [moved from topi/python/topi/testing/upsampling_python.py with 99% similarity]
python/tvm/topi/transform.py [moved from topi/python/topi/transform.py with 97% similarity]
python/tvm/topi/util.py [moved from topi/python/topi/util.py with 99% similarity]
python/tvm/topi/vision/__init__.py [moved from topi/python/topi/vision/__init__.py with 100% similarity]
python/tvm/topi/vision/nms.py [moved from topi/python/topi/vision/nms.py with 100% similarity]
python/tvm/topi/vision/rcnn/__init__.py [moved from topi/python/topi/vision/rcnn/__init__.py with 100% similarity]
python/tvm/topi/vision/rcnn/proposal.py [moved from topi/python/topi/vision/rcnn/proposal.py with 100% similarity]
python/tvm/topi/vision/rcnn/roi_align.py [moved from topi/python/topi/vision/rcnn/roi_align.py with 100% similarity]
python/tvm/topi/vision/rcnn/roi_pool.py [moved from topi/python/topi/vision/rcnn/roi_pool.py with 100% similarity]
python/tvm/topi/vision/reorg.py [moved from topi/python/topi/vision/reorg.py with 100% similarity]
python/tvm/topi/vision/ssd/__init__.py [moved from topi/python/topi/cuda/ssd/__init__.py with 100% similarity]
python/tvm/topi/vision/ssd/multibox.py [moved from topi/python/topi/vision/ssd/multibox.py with 99% similarity]
python/tvm/topi/x86/__init__.py [moved from topi/python/topi/x86/__init__.py with 100% similarity]
python/tvm/topi/x86/batch_matmul.py [moved from topi/python/topi/x86/batch_matmul.py with 100% similarity]
python/tvm/topi/x86/binarize_pack.py [moved from topi/python/topi/x86/binarize_pack.py with 100% similarity]
python/tvm/topi/x86/binary_dense.py [moved from topi/python/topi/x86/binary_dense.py with 100% similarity]
python/tvm/topi/x86/bitserial_conv2d.py [moved from topi/python/topi/x86/bitserial_conv2d.py with 100% similarity]
python/tvm/topi/x86/bitserial_dense.py [moved from topi/python/topi/x86/bitserial_dense.py with 99% similarity]
python/tvm/topi/x86/conv1d.py [moved from topi/python/topi/x86/conv1d.py with 100% similarity]
python/tvm/topi/x86/conv2d.py [moved from topi/python/topi/x86/conv2d.py with 100% similarity]
python/tvm/topi/x86/conv2d_alter_op.py [moved from topi/python/topi/x86/conv2d_alter_op.py with 100% similarity]
python/tvm/topi/x86/conv2d_avx_1x1.py [moved from topi/python/topi/x86/conv2d_avx_1x1.py with 100% similarity]
python/tvm/topi/x86/conv2d_avx_common.py [moved from topi/python/topi/x86/conv2d_avx_common.py with 100% similarity]
python/tvm/topi/x86/conv2d_int8.py [moved from topi/python/topi/x86/conv2d_int8.py with 100% similarity]
python/tvm/topi/x86/conv2d_transpose.py [moved from topi/python/topi/x86/conv2d_transpose.py with 100% similarity]
python/tvm/topi/x86/conv3d.py [moved from topi/python/topi/x86/conv3d.py with 100% similarity]
python/tvm/topi/x86/conv3d_transpose.py [moved from topi/python/topi/x86/conv3d_transpose.py with 100% similarity]
python/tvm/topi/x86/dense.py [moved from topi/python/topi/x86/dense.py with 100% similarity]
python/tvm/topi/x86/depthwise_conv2d.py [moved from topi/python/topi/x86/depthwise_conv2d.py with 100% similarity]
python/tvm/topi/x86/injective.py [moved from topi/python/topi/x86/injective.py with 100% similarity]
python/tvm/topi/x86/nn.py [moved from topi/python/topi/x86/nn.py with 100% similarity]
python/tvm/topi/x86/pooling.py [moved from topi/python/topi/x86/pooling.py with 100% similarity]
python/tvm/topi/x86/reduction.py [moved from topi/python/topi/x86/reduction.py with 100% similarity]
python/tvm/topi/x86/roi_align.py [moved from topi/python/topi/x86/roi_align.py with 100% similarity]
python/tvm/topi/x86/sparse.py [moved from topi/python/topi/x86/sparse.py with 100% similarity]
python/tvm/topi/x86/tensor_intrin.py [moved from topi/python/topi/x86/tensor_intrin.py with 100% similarity]
python/tvm/topi/x86/util.py [moved from topi/python/topi/x86/util.py with 100% similarity]
rust/tvm-graph-rt/tests/test_graph_serde.rs
src/relay/backend/compile_engine.cc
src/relay/op/annotation/annotation.cc
src/relay/op/debug.cc
src/relay/op/dyn/tensor/transform.cc
src/relay/op/memory/memory.cc
src/relay/op/nn/correlation.cc
src/relay/op/nn/nn.cc
src/relay/op/nn/pad.cc
src/relay/op/nn/pooling.cc
src/relay/op/tensor/binary.cc
src/relay/op/tensor/reduce.cc
src/relay/op/tensor/transform.cc
src/relay/op/tensor/unary.cc
src/relay/op/vision/yolo.cc
src/relay/op/vm/vm.cc
src/te/autodiff/adjoint.cc
src/topi/broadcast.cc [moved from topi/src/broadcast.cc with 97% similarity]
src/topi/elemwise.cc [moved from topi/src/elemwise.cc with 98% similarity]
src/topi/nn.cc [moved from topi/src/nn.cc with 92% similarity]
src/topi/reduction.cc [moved from topi/src/reduction.cc with 95% similarity]
src/topi/schedule.cc [moved from topi/src/schedule.cc with 94% similarity]
src/topi/transform.cc [moved from topi/src/transform.cc with 98% similarity]
src/topi/vision.cc [moved from topi/src/vision.cc with 94% similarity]
tests/cpp/auto_scheduler_test.cc
tests/cpp/build_module_test.cc
tests/cpp/relay_build_module_test.cc
tests/cpp/relay_transform_sequential_test.cc
tests/cpp/topi_ewise_test.cc
tests/cpp/utvm_runtime_standalone_test.cc
tests/python/contrib/test_cblas.py
tests/python/contrib/test_cudnn.py
tests/python/contrib/test_gemm_acc16.py
tests/python/contrib/test_gemm_acc32_vnni.py
tests/python/contrib/test_miopen.py
tests/python/contrib/test_mxnet_bridge.py
tests/python/contrib/test_nnpack.py
tests/python/contrib/test_tedd.py
tests/python/frontend/coreml/test_forward.py
tests/python/frontend/onnx/test_forward.py
tests/python/integration/test_winograd_nnpack.py
tests/python/relay/test_any.py
tests/python/relay/test_backend_compile_engine.py
tests/python/relay/test_op_fast_math.py
tests/python/relay/test_op_grad_level2.py
tests/python/relay/test_op_level1.py
tests/python/relay/test_op_level10.py
tests/python/relay/test_op_level2.py
tests/python/relay/test_op_level4.py
tests/python/relay/test_op_level5.py
tests/python/relay/test_op_qnn_concatenate.py
tests/python/relay/test_op_qnn_mul.py
tests/python/relay/test_pass_alter_op_layout.py
tests/python/topi/python/common.py [moved from topi/tests/python/common.py with 98% similarity]
tests/python/topi/python/test_fifo_buffer.py [moved from topi/tests/python/test_fifo_buffer.py with 95% similarity]
tests/python/topi/python/test_topi_basic.py [moved from topi/tests/python/test_topi_basic.py with 97% similarity]
tests/python/topi/python/test_topi_batch_matmul.py [moved from topi/tests/python/test_topi_batch_matmul.py with 91% similarity]
tests/python/topi/python/test_topi_bitserial_conv2d.py [moved from topi/tests/python/test_topi_bitserial_conv2d.py with 91% similarity]
tests/python/topi/python/test_topi_bitserial_conv2d_rasp.py [moved from topi/tests/python/test_topi_bitserial_conv2d_rasp.py with 92% similarity]
tests/python/topi/python/test_topi_bitserial_dense.py [moved from topi/tests/python/test_topi_bitserial_dense.py with 94% similarity]
tests/python/topi/python/test_topi_bnn.py [moved from topi/tests/python/test_topi_bnn.py with 97% similarity]
tests/python/topi/python/test_topi_broadcast.py [moved from topi/tests/python/test_topi_broadcast.py with 97% similarity]
tests/python/topi/python/test_topi_clip.py [moved from topi/tests/python/test_topi_clip.py with 93% similarity]
tests/python/topi/python/test_topi_conv1d.py [moved from topi/tests/python/test_topi_conv1d.py with 92% similarity]
tests/python/topi/python/test_topi_conv1d_transpose_ncw.py [moved from topi/tests/python/test_topi_conv1d_transpose_ncw.py with 92% similarity]
tests/python/topi/python/test_topi_conv2d_NCHWc.py [moved from topi/tests/python/test_topi_conv2d_NCHWc.py with 97% similarity]
tests/python/topi/python/test_topi_conv2d_hwcn.py [moved from topi/tests/python/test_topi_conv2d_hwcn.py with 92% similarity]
tests/python/topi/python/test_topi_conv2d_int8.py [moved from topi/tests/python/test_topi_conv2d_int8.py with 97% similarity]
tests/python/topi/python/test_topi_conv2d_nchw.py [moved from topi/tests/python/test_topi_conv2d_nchw.py with 96% similarity]
tests/python/topi/python/test_topi_conv2d_nhwc.py [moved from topi/tests/python/test_topi_conv2d_nhwc.py with 91% similarity]
tests/python/topi/python/test_topi_conv2d_nhwc_pack_int8.py [moved from topi/tests/python/test_topi_conv2d_nhwc_pack_int8.py with 91% similarity]
tests/python/topi/python/test_topi_conv2d_nhwc_tensorcore.py [moved from topi/tests/python/test_topi_conv2d_nhwc_tensorcore.py with 92% similarity]
tests/python/topi/python/test_topi_conv2d_nhwc_winograd.py [moved from topi/tests/python/test_topi_conv2d_nhwc_winograd.py with 93% similarity]
tests/python/topi/python/test_topi_conv2d_transpose_nchw.py [moved from topi/tests/python/test_topi_conv2d_transpose_nchw.py with 94% similarity]
tests/python/topi/python/test_topi_conv2d_winograd.py [moved from topi/tests/python/test_topi_conv2d_winograd.py with 93% similarity]
tests/python/topi/python/test_topi_conv3d_ncdhw.py [moved from topi/tests/python/test_topi_conv3d_ncdhw.py with 92% similarity]
tests/python/topi/python/test_topi_conv3d_ndhwc.py [moved from topi/tests/python/test_topi_conv3d_ndhwc.py with 91% similarity]
tests/python/topi/python/test_topi_conv3d_ndhwc_tensorcore.py [moved from topi/tests/python/test_topi_conv3d_ndhwc_tensorcore.py with 92% similarity]
tests/python/topi/python/test_topi_conv3d_transpose_ncdhw.py [moved from topi/tests/python/test_topi_conv3d_transpose_ncdhw.py with 94% similarity]
tests/python/topi/python/test_topi_conv3d_winograd.py [moved from topi/tests/python/test_topi_conv3d_winograd.py with 93% similarity]
tests/python/topi/python/test_topi_correlation.py [moved from topi/tests/python/test_topi_correlation.py with 92% similarity]
tests/python/topi/python/test_topi_deformable_conv2d.py [moved from topi/tests/python/test_topi_deformable_conv2d.py with 93% similarity]
tests/python/topi/python/test_topi_dense.py [moved from topi/tests/python/test_topi_dense.py with 97% similarity]
tests/python/topi/python/test_topi_dense_tensorcore.py [moved from topi/tests/python/test_topi_dense_tensorcore.py with 95% similarity]
tests/python/topi/python/test_topi_depth_to_space.py [moved from topi/tests/python/test_topi_depth_to_space.py with 94% similarity]
tests/python/topi/python/test_topi_depthwise_conv2d.py [moved from topi/tests/python/test_topi_depthwise_conv2d.py with 96% similarity]
tests/python/topi/python/test_topi_depthwise_conv2d_back_input.py [moved from topi/tests/python/test_topi_depthwise_conv2d_back_input.py with 94% similarity]
tests/python/topi/python/test_topi_depthwise_conv2d_back_weight.py [moved from topi/tests/python/test_topi_depthwise_conv2d_back_weight.py with 94% similarity]
tests/python/topi/python/test_topi_dilate.py [moved from topi/tests/python/test_topi_dilate.py with 94% similarity]
tests/python/topi/python/test_topi_group_conv2d.py [moved from topi/tests/python/test_topi_group_conv2d.py with 94% similarity]
tests/python/topi/python/test_topi_group_conv2d_NCHWc_int8.py [moved from topi/tests/python/test_topi_group_conv2d_NCHWc_int8.py with 96% similarity]
tests/python/topi/python/test_topi_image.py [moved from topi/tests/python/test_topi_image.py with 91% similarity]
tests/python/topi/python/test_topi_lrn.py [moved from topi/tests/python/test_topi_lrn.py with 90% similarity]
tests/python/topi/python/test_topi_math.py [moved from topi/tests/python/test_topi_math.py with 96% similarity]
tests/python/topi/python/test_topi_matmul.py [moved from topi/tests/python/test_topi_matmul.py with 97% similarity]
tests/python/topi/python/test_topi_pooling.py [moved from topi/tests/python/test_topi_pooling.py with 95% similarity]
tests/python/topi/python/test_topi_reduce.py [moved from topi/tests/python/test_topi_reduce.py with 98% similarity]
tests/python/topi/python/test_topi_relu.py [moved from topi/tests/python/test_topi_relu.py with 96% similarity]
tests/python/topi/python/test_topi_reorg.py [moved from topi/tests/python/test_topi_reorg.py with 91% similarity]
tests/python/topi/python/test_topi_softmax.py [moved from topi/tests/python/test_topi_softmax.py with 90% similarity]
tests/python/topi/python/test_topi_sort.py [moved from topi/tests/python/test_topi_sort.py with 95% similarity]
tests/python/topi/python/test_topi_space_to_depth.py [moved from topi/tests/python/test_topi_space_to_depth.py with 94% similarity]
tests/python/topi/python/test_topi_sparse.py [moved from topi/tests/python/test_topi_sparse.py with 98% similarity]
tests/python/topi/python/test_topi_tensor.py [moved from topi/tests/python/test_topi_tensor.py with 97% similarity]
tests/python/topi/python/test_topi_transform.py [moved from topi/tests/python/test_topi_transform.py with 94% similarity]
tests/python/topi/python/test_topi_upsampling.py [moved from topi/tests/python/test_topi_upsampling.py with 93% similarity]
tests/python/topi/python/test_topi_util.py [moved from topi/tests/python/test_topi_util.py with 97% similarity]
tests/python/topi/python/test_topi_vision.py [moved from topi/tests/python/test_topi_vision.py with 95% similarity]
tests/python/unittest/test_auto_scheduler_common.py
tests/python/unittest/test_auto_scheduler_compute_dag.py
tests/python/unittest/test_auto_scheduler_loop_state.py
tests/python/unittest/test_auto_scheduler_measure.py
tests/python/unittest/test_runtime_heterogeneous.py
tests/python/unittest/test_target_codegen_cuda.py
tests/python/unittest/test_target_codegen_llvm.py
tests/python/unittest/test_target_custom_datatypes.py
tests/python/unittest/test_te_autodiff.py
tests/python/unittest/test_te_schedule_postproc_rewrite_for_tensor_core.py
tests/python/unittest/test_te_schedule_tensor_core.py
tests/python/unittest/test_te_tensor.py
tests/python/unittest/test_te_tensor_overload.py
tests/python/unittest/test_tir_data_layout.py
tests/python/unittest/test_tir_intrin.py
tests/python/unittest/test_tir_transform_bf16_legalize.py
tests/python/unittest/test_tir_transform_loop_partition.py
tests/scripts/setup-pytest-env.sh
tests/scripts/task_golang.sh
tests/scripts/task_python_topi.sh
tests/scripts/task_rust.sh
topi/python/setup.py [deleted file]
topi/python/topi/cpp/impl.py [deleted file]
tutorials/autotvm/tune_conv2d_cuda.py
tutorials/frontend/deploy_model_on_android.py
tutorials/language/tedd.py
tutorials/language/tensorize.py
tutorials/topi/intro_topi.py
vta/python/vta/__init__.py
vta/python/vta/top/bitpack.py
vta/python/vta/top/op.py
vta/python/vta/top/vta_conv2d.py
vta/python/vta/top/vta_conv2d_transpose.py
vta/python/vta/top/vta_dense.py
vta/python/vta/top/vta_group_conv2d.py
vta/python/vta/transform.py
vta/scripts/tune_conv2d.py
vta/scripts/tune_conv2d_transpose.py
vta/scripts/tune_dense.py
vta/scripts/tune_group_conv2d.py
vta/scripts/tune_resnet.py
vta/tests/python/integration/test_benchmark_topi_conv2d.py
vta/tests/python/integration/test_benchmark_topi_conv2d_transpose.py
vta/tests/python/integration/test_benchmark_topi_dense.py
vta/tests/python/integration/test_benchmark_topi_group_conv2d.py
vta/tests/python/unittest/test_vta_insn.py
vta/tutorials/autotvm/tune_relay_vta.py
vta/tutorials/optimize/convolution_opt.py

index 2911a61..1c66712 100644 (file)
@@ -245,7 +245,7 @@ list(APPEND COMPILER_SRCS ${DATATYPE_SRCS})
 
 
 file(GLOB TOPI_SRCS
-    topi/src/*.cc
+    src/topi/*.cc
 )
 
 file(GLOB RUNTIME_SRCS
@@ -341,7 +341,7 @@ else()
   set(CMAKE_CUDA_STANDARD 14)
 endif()
 
-add_library(tvm SHARED ${COMPILER_SRCS} ${RUNTIME_SRCS})
+add_library(tvm SHARED ${COMPILER_SRCS} ${RUNTIME_SRCS} ${TOPI_SRCS})
 add_library(tvm_topi SHARED ${TOPI_SRCS})
 add_library(tvm_runtime SHARED ${RUNTIME_SRCS})
 
@@ -403,15 +403,6 @@ if (HIDE_PRIVATE_SYMBOLS AND NOT ${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
   target_link_libraries(tvm_runtime ${HIDE_SYMBOLS_LINKER_FLAGS})
 endif()
 
-# Related headers
-target_include_directories(
-  tvm
-  PUBLIC "topi/include")
-target_include_directories(
-  tvm_topi
-  PUBLIC "topi/include")
-
-
 # Tests
 set(TEST_EXECS "")
 file(GLOB TEST_SRCS tests/cpp/*.cc)
@@ -457,11 +448,6 @@ if (INSTALL_DEV)
     PATTERN "*.h"
   )
   install(
-    DIRECTORY "topi/include/." DESTINATION "include"
-    FILES_MATCHING
-    PATTERN "*.h"
-  )
-  install(
     DIRECTORY "3rdparty/dlpack/include/." DESTINATION "include"
     FILES_MATCHING
     PATTERN "*.h"
index 9063cd1..825e589 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -77,14 +77,12 @@ $(OUTPUTDIR)/libtvm_web_runtime.js: $(OUTPUTDIR)/libtvm_web_runtime.bc
 # Lint scripts
 cpplint:
        python3 3rdparty/dmlc-core/scripts/lint.py vta cpp vta/include vta/src
-       python3 3rdparty/dmlc-core/scripts/lint.py topi cpp topi/include;
        python3 3rdparty/dmlc-core/scripts/lint.py tvm cpp \
         include src \
         examples/extension/src examples/graph_executor/src
 
 pylint:
        python3 -m pylint python/tvm --rcfile=$(ROOTDIR)/tests/lint/pylintrc
-       python3 -m pylint topi/python/topi --rcfile=$(ROOTDIR)/tests/lint/pylintrc
        python3 -m pylint vta/python/vta --rcfile=$(ROOTDIR)/tests/lint/pylintrc
 
 jnilint:
index f135c97..a5eacb0 100644 (file)
@@ -39,8 +39,6 @@ LOCAL_LDFLAGS := -L$(SYSROOT)/usr/lib/ -llog
 LOCAL_C_INCLUDES := $(ROOT_PATH)/include \
                     $(ROOT_PATH)/3rdparty/dlpack/include \
                     $(ROOT_PATH)/3rdparty/dmlc-core/include \
-                    $(ROOT_PATH)/3rdparty/HalideIR/src \
-                    $(ROOT_PATH)/topi/include
 
 LOCAL_MODULE = tvm4j_runtime_packed
 
index 58f82f9..1b06a6b 100644 (file)
@@ -38,8 +38,7 @@ LOCAL_LDFLAGS := -L$(SYSROOT)/usr/lib/ -llog
 
 LOCAL_C_INCLUDES := $(ROOT_PATH)/include \
                     $(ROOT_PATH)/3rdparty/dlpack/include \
-                    $(ROOT_PATH)/3rdparty/dmlc-core/include \
-                    $(ROOT_PATH)/topi/include
+                    $(ROOT_PATH)/3rdparty/dmlc-core/include
 
 LOCAL_MODULE = tvm4j_runtime_packed
 
index 58f82f9..1b06a6b 100644 (file)
@@ -38,8 +38,7 @@ LOCAL_LDFLAGS := -L$(SYSROOT)/usr/lib/ -llog
 
 LOCAL_C_INCLUDES := $(ROOT_PATH)/include \
                     $(ROOT_PATH)/3rdparty/dlpack/include \
-                    $(ROOT_PATH)/3rdparty/dmlc-core/include \
-                    $(ROOT_PATH)/topi/include
+                    $(ROOT_PATH)/3rdparty/dmlc-core/include
 
 LOCAL_MODULE = tvm4j_runtime_packed
 
index 702dd04..f54aecc 100644 (file)
@@ -35,9 +35,7 @@ fn main() {
             concat!(
                 mf_dir!("/../../python"),
                 ":",
-                mf_dir!("/../../nnvm/python"),
-                ":",
-                mf_dir!("/../../topi/python")
+                mf_dir!("/../../nnvm/python")
             ),
         )
         .output()
similarity index 86%
rename from topi/README.md
rename to apps/topi_recipe/README.md
index 4da5910..06c52c6 100644 (file)
@@ -15,7 +15,7 @@
 <!--- specific language governing permissions and limitations -->
 <!--- under the License. -->
 
-# TOPI: TVM Operator Inventory
+# TOPI Recipe: TVM Operator Optimization Recipes
 
 TOPI is the operator collection library for TVM intended at sharing the effort of crafting
 and optimizing tvm generated kernels. The goal:
@@ -24,11 +24,6 @@ and optimizing tvm generated kernels. The goal:
 - Give common primitives for fused op creation.
 - Provide commonly used schedules under each architectures
 
-## Organization
-- [include](include) C++ library, header only
-- [python](python) python library
-- [recipe](recipe) Recipe collections containing useful operator examples.
-
 ## Guidelines
 - Use numpy-style naming convention for known ops
 - Seperate operator declaration from schedule when possible.
@@ -39,10 +34,6 @@ and optimizing tvm generated kernels. The goal:
 - Data layout aware, if not specified in argument or in function, assume NCHW by default.
 
 
-## Testcase
-- Add testcases to testout the schedule and dataflow in the TOPI workflow
-- Only do correctness testing without attaching compiler flags and only run it once.
-
 ## Performance Tuning Workflow
 Since TVM is work in progress, some optimization might not be perfect.
 One quick way I find useful is to do codegen plus manual modification.
@@ -20,7 +20,7 @@ from tvm import te
 from tvm.contrib import nvcc
 import numpy as np
 
-import topi
+from tvm import topi
 
 
 TASK = "reduce_map"
similarity index 95%
rename from topi/recipe/conv/depthwise_conv2d_test.py
rename to apps/topi_recipe/conv/depthwise_conv2d_test.py
index 72e054e..c5f8b07 100644 (file)
@@ -21,9 +21,9 @@ import numpy as np
 from scipy import signal
 from tvm.contrib import nvcc
 
-import topi
-from topi.util import get_const_tuple
-from topi.cuda.depthwise_conv2d import schedule_depthwise_conv2d_nchw, schedule_depthwise_conv2d_nhwc
+from tvm import topi
+from tvm.topi.util import get_const_tuple
+from tvm.topi.cuda.depthwise_conv2d import schedule_depthwise_conv2d_nchw, schedule_depthwise_conv2d_nhwc
 
 TASK = "depthwise_conv2d"
 USE_MANUAL_CODE = False
@@ -118,7 +118,7 @@ def test_depthwise_conv2d_nchw():
         print("average time cost of 1000 runs (depthwise_conv2d + scale_shift) = %g us" % (tcost_2*1e6))
         print("average time cost of 1000 runs (depthwise_conv2d + scale_shift + relu) = %g us" % (tcost_3*1e6))
         # correctness
-        depthwise_conv2d_scipy = topi.testing.depthwise_conv2d_python_nchw(input_np, filter_np, stride=[stride_h, stride_w], padding=padding)
+        depthwise_conv2d_scipy = tvm.topi.testing.depthwise_conv2d_python_nchw(input_np, filter_np, stride=[stride_h, stride_w], padding=padding)
         scale_shift_scipy = np.zeros(shape=get_const_tuple(ScaleShift.shape))
         for c in range(in_channel * channel_multiplier):
             scale_shift_scipy[:,c,:,:] = depthwise_conv2d_scipy[:,c,:,:] * scale_np[c] + shift_np[c]
@@ -207,7 +207,7 @@ def test_depthwise_conv2d_nhwc():
         print("average time cost of 1000 runs (depthwise_conv2d + scale_shift) = %g us" % (tcost_2*1e6))
         print("average time cost of 1000 runs (depthwise_conv2d + scale_shift + relu) = %g us" % (tcost_3*1e6))
         # correctness
-        depthwise_conv2d_scipy = topi.testing.depthwise_conv2d_python_nhwc(input_np, filter_np, stride=[stride_h, stride_w], padding=padding)
+        depthwise_conv2d_scipy = tvm.topi.testing.depthwise_conv2d_python_nhwc(input_np, filter_np, stride=[stride_h, stride_w], padding=padding)
         scale_shift_scipy = np.zeros(shape=get_const_tuple(ScaleShift.shape))
         for c in range(in_channel * channel_multiplier):
             scale_shift_scipy[:,:,:,c] = depthwise_conv2d_scipy[:,:,:,c] * scale_np[c] + shift_np[c]
similarity index 95%
rename from topi/recipe/conv/test_conv2d_hwcn_map.py
rename to apps/topi_recipe/conv/test_conv2d_hwcn_map.py
index 35cd477..605044c 100644 (file)
@@ -21,8 +21,8 @@ import scipy.signal
 import tvm
 from tvm import te
 from tvm.contrib import nvcc
-import topi
-from topi.util import get_const_tuple
+from tvm import topi
+from tvm.topi.util import get_const_tuple
 
 TASK = "conv2d_hwcn_map"
 USE_MANUAL_CODE = False
@@ -65,7 +65,7 @@ def test_conv2d_hwcn_map():
 
     a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
     w_np = np.random.uniform(size=get_const_tuple(W.shape)).astype(W.dtype)
-    b_np = topi.testing.conv2d_hwcn_python(a_np, w_np, stride, padding)
+    b_np = tvm.topi.testing.conv2d_hwcn_python(a_np, w_np, stride, padding)
     c_np = np.maximum(b_np, 0)
 
     def check_device(device):
similarity index 99%
rename from topi/recipe/conv/test_conv_int8_arm.py
rename to apps/topi_recipe/conv/test_conv_int8_arm.py
index f0b260e..d4f98b0 100644 (file)
@@ -21,7 +21,7 @@ import logging
 import numpy as np
 import tvm
 from tvm import te
-import topi
+from tvm import topi
 
 logging.basicConfig(stream=sys.stdout, level=logging.INFO)
 LOGGER = logging.getLogger('test_conv_int8_intel')
similarity index 99%
rename from topi/recipe/conv/test_conv_int8_intel.py
rename to apps/topi_recipe/conv/test_conv_int8_intel.py
index 767262d..93b7833 100644 (file)
@@ -21,7 +21,7 @@ import logging
 import numpy as np
 import tvm
 from tvm import te
-import topi
+from tvm import topi
 
 logging.basicConfig(stream=sys.stdout, level=logging.INFO)
 LOGGER = logging.getLogger('test_conv_int8_intel')
similarity index 99%
rename from topi/recipe/gemm/gemm_int8.py
rename to apps/topi_recipe/gemm/gemm_int8.py
index 9d668eb..fd03711 100644 (file)
@@ -21,7 +21,7 @@ import numpy as np
 import tvm
 from tvm import te
 from tvm import autotvm
-from topi.cuda.tensor_intrin import dp4a
+from tvm.topi.cuda.tensor_intrin import dp4a
 
 DO_TUNING = True
 PRETUNED_INDEX = 75333
similarity index 99%
rename from topi/recipe/reduce/test_reduce_map.py
rename to apps/topi_recipe/reduce/test_reduce_map.py
index 5e5caec..b6d0602 100644 (file)
@@ -20,7 +20,7 @@ from tvm import te
 from tvm.contrib import nvcc
 import numpy as np
 
-import topi
+from tvm import topi
 
 
 TASK = "reduce_map"
index 358e0b9..9bdbe0a 100644 (file)
@@ -22,7 +22,3 @@ set -u
 cd python
 $PYTHON setup.py install --single-version-externally-managed --record=/tmp/record.txt
 cd ..
-
-cd topi/python
-$PYTHON setup.py install --single-version-externally-managed --record=/tmp/record.txt
-cd ../..
index 13d1a21..185f741 100644 (file)
@@ -70,5 +70,5 @@ RUN cd /usr && \
     make -j10
 
 # Environment variables
-ENV PYTHONPATH=/usr/tvm/python:/usr/tvm/topi/python:/usr/tvm/vta/python:${PYTHONPATH}
+ENV PYTHONPATH=/usr/tvm/python:/usr/tvm/vta/python:${PYTHONPATH}
 ENV ANDROID_HOME=/opt/android-sdk-linux/
index 01ba9f6..3f08e1d 100644 (file)
@@ -30,4 +30,4 @@ COPY install/install_tvm_cpu.sh /install/install_tvm_cpu.sh
 RUN bash /install/install_tvm_cpu.sh
 
 # Environment variables
-ENV PYTHONPATH=/usr/tvm/python:/usr/tvm/topi/python:/usr/tvm/vta/python:${PYTHONPATH}
+ENV PYTHONPATH=/usr/tvm/python:/usr/tvm/vta/python:${PYTHONPATH}
index b97150f..489a67d 100644 (file)
@@ -28,7 +28,7 @@ COPY install/install_tvm_gpu.sh /install/install_tvm_gpu.sh
 RUN bash /install/install_tvm_gpu.sh
 
 # Environment variables
-ENV PYTHONPATH=/usr/tvm/python:/usr/tvm/topi/python:/usr/tvm/vta/python:${PYTHONPATH}
+ENV PYTHONPATH=/usr/tvm/python:/usr/tvm/vta/python:${PYTHONPATH}
 ENV PATH=/usr/local/nvidia/bin:${PATH}
 ENV PATH=/usr/local/cuda/bin:${PATH}
 ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/nvidia/lib64:${LD_LIBRARY_PATH}
index fb2d899..e39ee41 100644 (file)
@@ -76,6 +76,5 @@ RUN mkdir -p ${TVM_BUILD_DIR} && \
        make -j6
 
 RUN echo "Building Python package"
-ENV PYTHONPATH=${TVM_HOME}/python:${TVM_HOME}/topi/python:${PYTHONPATH}
+ENV PYTHONPATH=${TVM_HOME}/python:${PYTHONPATH}
 RUN cd ${TVM_HOME}/python && python3 setup.py install --user
-RUN cd ${TVM_HOME}/topi/python && python3 setup.py install --user
index 191448f..73bfb12 100755 (executable)
@@ -70,7 +70,7 @@ else
 fi
 
 if [[ "${DOCKER_IMAGE_NAME}" == *"ci"* ]]; then
-    CI_PY_ENV="-e PYTHONPATH=/workspace/python:/workspace/topi/python"
+    CI_PY_ENV="-e PYTHONPATH=/workspace/python"
 else
     CI_PY_ENV=""
 fi
index d665d20..6eb3ee6 100644 (file)
@@ -770,7 +770,7 @@ WARN_LOGFILE           =
 # spaces.
 # Note: If this tag is empty the current directory is searched.
 
-INPUT                  = include/tvm topi/include/topi
+INPUT                  = include/tvm
 
 # This tag can be used to specify the character encoding of the source files
 # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
index bee6e56..bc9ec5f 100644 (file)
@@ -44,5 +44,5 @@ Python API
    micro
    contrib
    graph_runtime
-   vta/index
    topi
+   vta/index
index 09c3318..f62509f 100644 (file)
     specific language governing permissions and limitations
     under the License.
 
-topi
-----
-.. automodule:: topi
-
-List of operators
-~~~~~~~~~~~~~~~~~
-
-.. autosummary::
-
-   topi.identity
-   topi.negative
-   topi.floor
-   topi.ceil
-   topi.sign
-   topi.trunc
-   topi.round
-   topi.abs
-   topi.isnan
-   topi.isfinite
-   topi.isinf
-   topi.exp
-   topi.tanh
-   topi.log
-   topi.sqrt
-   topi.rsqrt
-   topi.sigmoid
-   topi.clip
-   topi.cast
-   topi.reinterpret
-   topi.transpose
-   topi.flip
-   topi.reverse_sequence
-   topi.strided_slice
-   topi.expand_dims
-   topi.reshape
-   topi.unravel_index
-   topi.sparse_to_dense
-   topi.squeeze
-   topi.concatenate
-   topi.split
-   topi.take
-   topi.gather
-   topi.gather_nd
-   topi.full
-   topi.full_like
-   topi.nn.relu
-   topi.nn.leaky_relu
-   topi.nn.dilate
-   topi.nn.pool
-   topi.nn.global_pool
-   topi.nn.adaptive_pool
-   topi.nn.upsampling
-   topi.nn.softmax
-   topi.nn.dense
-   topi.nn.batch_matmul
-   topi.nn.log_softmax
-   topi.nn.conv2d_nchw
-   topi.nn.conv2d_hwcn
-   topi.nn.depthwise_conv2d_nchw
-   topi.nn.depthwise_conv2d_nhwc
-   topi.nn.fifo_buffer
-   topi.max
-   topi.sum
-   topi.min
-   topi.argmax
-   topi.argmin
-   topi.prod
-   topi.broadcast_to
-   topi.add
-   topi.subtract
-   topi.multiply
-   topi.divide
-   topi.mod
-   topi.maximum
-   topi.minimum
-   topi.power
-   topi.greater
-   topi.less
-   topi.equal
-   topi.not_equal
-   topi.greater_equal
-   topi.less_equal
-   topi.all
-   topi.any
-   topi.logical_and
-   topi.logical_or
-   topi.logical_not
-   topi.logical_xor
-   topi.arange
-   topi.meshgrid
-   topi.stack
-   topi.repeat
-   topi.tile
-   topi.shape
-   topi.ndarray_size
-   topi.layout_transform
-   topi.image.resize
-   topi.image.crop_and_resize
-   topi.image.dilation2d
-   topi.argsort
-   topi.topk
-   topi.sequence_mask
-   topi.one_hot
-
-
-List of schedules
-~~~~~~~~~~~~~~~~~
-.. autosummary::
-
-   topi.generic.schedule_conv2d_nchw
-   topi.generic.schedule_depthwise_conv2d_nchw
-   topi.generic.schedule_reduce
-   topi.generic.schedule_broadcast
-   topi.generic.schedule_injective
+tvm.topi
+--------
+.. automodule:: tvm.topi
+   :members:
+   :imported-members:
+   :autosummary:
+
+tvm.topi.nn
+~~~~~~~~~~~
 
-topi
-~~~~
-.. autofunction:: topi.negative
-.. autofunction:: topi.identity
-.. autofunction:: topi.floor
-.. autofunction:: topi.ceil
-.. autofunction:: topi.sign
-.. autofunction:: topi.trunc
-.. autofunction:: topi.round
-.. autofunction:: topi.abs
-.. autofunction:: topi.isnan
-.. autofunction:: topi.isfinite
-.. autofunction:: topi.isinf
-.. autofunction:: topi.exp
-.. autofunction:: topi.tanh
-.. autofunction:: topi.log
-.. autofunction:: topi.sqrt
-.. autofunction:: topi.rsqrt
-.. autofunction:: topi.sigmoid
-.. autofunction:: topi.clip
-.. autofunction:: topi.cast
-.. autofunction:: topi.reinterpret
-.. autofunction:: topi.transpose
-.. autofunction:: topi.flip
-.. autofunction:: topi.reverse_sequence
-.. autofunction:: topi.strided_slice
-.. autofunction:: topi.expand_dims
-.. autofunction:: topi.reshape
-.. autofunction:: topi.unravel_index
-.. autofunction:: topi.sparse_to_dense
-.. autofunction:: topi.squeeze
-.. autofunction:: topi.concatenate
-.. autofunction:: topi.split
-.. autofunction:: topi.take
-.. autofunction:: topi.gather
-.. autofunction:: topi.gather_nd
-.. autofunction:: topi.full
-.. autofunction:: topi.full_like
-.. autofunction:: topi.all
-.. autofunction:: topi.any
-.. autofunction:: topi.max
-.. autofunction:: topi.sum
-.. autofunction:: topi.min
-.. autofunction:: topi.prod
-.. autofunction:: topi.broadcast_to
-.. autofunction:: topi.add
-.. autofunction:: topi.subtract
-.. autofunction:: topi.multiply
-.. autofunction:: topi.divide
-.. autofunction:: topi.floor_divide
-.. autofunction:: topi.mod
-.. autofunction:: topi.floor_mod
-.. autofunction:: topi.maximum
-.. autofunction:: topi.minimum
-.. autofunction:: topi.power
-.. autofunction:: topi.greater
-.. autofunction:: topi.less
-.. autofunction:: topi.arange
-.. autofunction:: topi.meshgrid
-.. autofunction:: topi.stack
-.. autofunction:: topi.repeat
-.. autofunction:: topi.tile
-.. autofunction:: topi.shape
-.. autofunction:: topi.ndarray_size
-.. autofunction:: topi.layout_transform
-.. autofunction:: topi.argsort
-.. autofunction:: topi.topk
-.. autofunction:: topi.sequence_mask
-.. autofunction:: topi.one_hot
-.. autofunction:: topi.logical_and
-.. autofunction:: topi.logical_or
-.. autofunction:: topi.logical_not
-.. autofunction:: topi.logical_xor
+.. automodule:: tvm.topi.nn
+   :members:
+   :imported-members:
+   :autosummary:
 
-topi.nn
-~~~~~~~
-.. autofunction:: topi.nn.relu
-.. autofunction:: topi.nn.leaky_relu
-.. autofunction:: topi.nn.dilate
-.. autofunction:: topi.nn.pool
-.. autofunction:: topi.nn.global_pool
-.. autofunction:: topi.nn.upsampling
-.. autofunction:: topi.nn.softmax
-.. autofunction:: topi.nn.dense
-.. autofunction:: topi.nn.batch_matmul
-.. autofunction:: topi.nn.log_softmax
-.. autofunction:: topi.nn.conv2d_nchw
-.. autofunction:: topi.nn.conv2d_hwcn
-.. autofunction:: topi.nn.depthwise_conv2d_nchw
-.. autofunction:: topi.nn.depthwise_conv2d_nhwc
-.. autofunction:: topi.nn.conv3d_ncdhw
-.. autofunction:: topi.nn.conv3d_transpose_ncdhw
-.. autofunction:: topi.nn.fifo_buffer
+tvm.topi.image
+~~~~~~~~~~~~~~
+.. automodule:: tvm.topi.image
+   :members:
+   :imported-members:
+   :autosummary:
 
-topi.image
-~~~~~~~~~~
-.. autofunction:: topi.image.resize
-.. autofunction:: topi.image.crop_and_resize
 
-topi.sparse
-~~~~~~~~~~~
-.. autofunction:: topi.sparse.csrmv
-.. autofunction:: topi.sparse.csrmm
-.. autofunction:: topi.sparse.dense
+tvm.topi.sparse
+~~~~~~~~~~~~~~~
+.. automodule:: tvm.topi.sparse
+   :members:
+   :imported-members:
+   :autosummary:
 
-topi.generic
-~~~~~~~~~~~~
-.. automodule:: topi.generic
 
-.. autofunction:: topi.generic.schedule_conv2d_nchw
-.. autofunction:: topi.generic.schedule_depthwise_conv2d_nchw
-.. autofunction:: topi.generic.schedule_conv3d_ncdhw
-.. autofunction:: topi.generic.schedule_conv3d_transpose_ncdhw
-.. autofunction:: topi.generic.schedule_reduce
-.. autofunction:: topi.generic.schedule_broadcast
-.. autofunction:: topi.generic.schedule_injective
index 73836e9..c03f1b7 100644 (file)
@@ -40,7 +40,6 @@ import sphinx_gallery
 # documentation root, use os.path.abspath to make it absolute, like shown here.
 curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
 sys.path.insert(0, os.path.join(curr_path, '../python/'))
-sys.path.insert(0, os.path.join(curr_path, '../topi/python'))
 sys.path.insert(0, os.path.join(curr_path, '../vta/python'))
 
 # -- General configuration ------------------------------------------------
@@ -54,6 +53,7 @@ github_doc_root = 'https://github.com/apache/incubator-tvm/tree/master/docs/'
 os.environ['TVM_BUILD_DOC'] = '1'
 # Version information.
 import tvm
+from tvm import topi
 from tvm import te
 version = tvm.__version__
 release = tvm.__version__
index 7e0ba37..128ae80 100644 (file)
@@ -121,7 +121,7 @@ If you want to run a single test:
   make
 
   # let python know where to find tvm related libraries
-  export PYTHONPATH=python:topi/python
+  export PYTHONPATH=python
   rm -rf python/tvm/*.pyc python/tvm/*/*.pyc python/tvm/*/*/*.pyc
 
   TVM_FFI=ctypes python -m pytest -v tests/python/unittest/test_pass_storage_rewrite.py
index 7a00339..0a21bb8 100644 (file)
@@ -30,7 +30,7 @@ At the root of the TVM repository, we have following subdirectories that togethe
 - ``src`` - C++ code for operator compilation and deployment runtimes.
 - ``src/relay`` - Implementation of Relay, a new functional IR for deep learning framework.
 - ``python`` - Python frontend that wraps C++ functions and objects implemented in ``src``.
-- ``topi`` - Compute definitions and backend schedules for standard neural network operators.
+- ``src/topi`` - Compute definitions and backend schedules for standard neural network operators.
 
 Using standard Deep Learning terminology, ``src/relay`` is the component that manages a computational graph, and nodes in a graph are compiled and executed using infrastructure implemented in the rest of ``src``. ``python`` provides python bindings for the C++ API and driver code that users can use to execute compilation. Operators corresponding to each node are registered in ``src/relay/op``. Implementations of operators are in ``topi``, and they are coded in either C++ or Python.
 
index 9fe8394..c448cb0 100644 (file)
@@ -335,8 +335,8 @@ these scheduling components to the a `tir::PrimFunc` itself.
    inferbound
    hybrid_script
 
-topi
-----
+tvm/topi
+--------
 While possible to construct operators directly via TIR or tensor expressions (TE) for each use case it is tedious to do so.
 `topi` (Tensor operator inventory) provides a set of pre-defined operators (in TE or TIR) defined by
 numpy and found in common deep learning workloads. We also provide a collection of common schedule templates to obtain performant implementations across different target platforms.
index 51ab887..26aec77 100644 (file)
@@ -51,9 +51,9 @@ Build the Shared Library
 
 Our goal is to build the shared libraries:
 
-- On Linux the target library are `libtvm.so, libtvm_topi.so`
-- On macOS the target library are `libtvm.dylib, libtvm_topi.dylib`
-- On Windows the target library are `libtvm.dll, libtvm_topi.dll`
+- On Linux the target library are `libtvm.so`
+- On macOS the target library are `libtvm.dylib`
+- On Windows the target library are `libtvm.dll`
 
 
 .. code:: bash
@@ -174,7 +174,7 @@ Method 1
    .. code:: bash
 
        export TVM_HOME=/path/to/tvm
-       export PYTHONPATH=$TVM_HOME/python:$TVM_HOME/topi/python:${PYTHONPATH}
+       export PYTHONPATH=$TVM_HOME/python:${PYTHONPATH}
 
 
 Method 2
@@ -188,8 +188,6 @@ Method 2
        #       providing --user flag may trigger error during installation in such case.
        export MACOSX_DEPLOYMENT_TARGET=10.9  # This is required for mac to avoid symbol conflicts with libstdc++
        cd python; python setup.py install --user; cd ..
-       cd topi/python; python setup.py install --user; cd ../..
-
 
 Python dependencies
 ~~~~~~~~~~~~~~~~~~~
index 3b93360..7585177 100644 (file)
@@ -267,7 +267,7 @@ Operators
 An operator is a primitive operation, such as :code:`add` or :code:`conv2d`, not defined in the Relay
 language. Operators are declared in the global operator
 registry in C++. Many common operators are backed by TVM's
-Tensor Operator Inventory (`TOPI <https://github.com/apache/incubator-tvm/tree/master/topi>`__).
+Tensor Operator Inventory.
 
 To register an operator a user must provide an implementation
 of the operator, its type, and any other desired metadata.
similarity index 98%
rename from topi/include/topi/broadcast.h
rename to include/tvm/topi/broadcast.h
index 1b36ace..8fabaae 100644 (file)
  * \brief Broadcast op constructions
  * \file topi/broadcast.h
  */
-#ifndef TOPI_BROADCAST_H_
-#define TOPI_BROADCAST_H_
+#ifndef TVM_TOPI_BROADCAST_H_
+#define TVM_TOPI_BROADCAST_H_
 
-#include <topi/detail/broadcast.h>
-#include <topi/detail/constant_utils.h>
-#include <topi/tags.h>
+#include <tvm/topi/detail/broadcast.h>
+#include <tvm/topi/detail/constant_utils.h>
+#include <tvm/topi/tags.h>
 
 #include <algorithm>
 #include <string>
 
+namespace tvm {
 namespace topi {
 
 /*!
@@ -429,5 +430,6 @@ TOPI_DEFINE_BCAST_OP(greater_equal, { return (a >= b); });
 TOPI_DEFINE_BCAST_OP(less_equal, { return (a <= b); });
 
 }  // namespace topi
+}  // namespace tvm
 
-#endif  // TOPI_BROADCAST_H_
+#endif  // TVM_TOPI_BROADCAST_H_
similarity index 93%
rename from topi/include/topi/contrib/cublas.h
rename to include/tvm/topi/contrib/cublas.h
index 30ad525..3032643 100644 (file)
  * \brief External function interface to cuBLAS libraries
  * \file cublas.h
  */
-#ifndef TOPI_CONTRIB_CUBLAS_H_
-#define TOPI_CONTRIB_CUBLAS_H_
+#ifndef TVM_TOPI_CONTRIB_CUBLAS_H_
+#define TVM_TOPI_CONTRIB_CUBLAS_H_
 
-#include <topi/detail/extern.h>
 #include <tvm/te/operation.h>
+#include <tvm/topi/detail/extern.h>
 
+namespace tvm {
 namespace topi {
 namespace contrib {
-using namespace tvm;
+
 using namespace tvm::te;
 using namespace topi::detail;
 /*!
@@ -82,5 +83,6 @@ inline Tensor cublas_batch_matmul(const Tensor& lhs, const Tensor& rhs, bool tra
 
 }  // namespace contrib
 }  // namespace topi
+}  // namespace tvm
 
-#endif  // TOPI_CONTRIB_CUBLAS_H_
+#endif  // TVM_TOPI_CONTRIB_CUBLAS_H_
similarity index 90%
rename from topi/include/topi/contrib/rocblas.h
rename to include/tvm/topi/contrib/rocblas.h
index 988c375..a4fa26f 100644 (file)
  * \brief External function interface to rocBLAS libraries
  * \file tags.h
  */
-#ifndef TOPI_CONTRIB_ROCBLAS_H_
-#define TOPI_CONTRIB_ROCBLAS_H_
+#ifndef TVM_TOPI_CONTRIB_ROCBLAS_H_
+#define TVM_TOPI_CONTRIB_ROCBLAS_H_
 
 #include <tvm/te/operation.h>
+#include <tvm/topi/detail/extern.h>
 
-#include "topi/detail/extern.h"
-
+namespace tvm {
 namespace topi {
 namespace contrib {
-using namespace tvm;
+
 using namespace tvm::te;
 /*!
  * \brief Create an op that multiplies lhs and rhs with rocBLAS
@@ -57,5 +57,6 @@ inline Tensor rocblas_matmul(const Tensor& lhs, const Tensor& rhs, bool transa,
 
 }  // namespace contrib
 }  // namespace topi
+}  // namespace tvm
 
-#endif  // TOPI_CONTRIB_ROCBLAS_H_
+#endif  // TVM_TOPI_CONTRIB_ROCBLAS_H_
similarity index 93%
rename from topi/include/topi/cuda/dense.h
rename to include/tvm/topi/cuda/dense.h
index c8ceebf..34af343 100644 (file)
  * \file cuda/dense.h
  * \brief CUDA schedule for dense operation
  */
-#ifndef TOPI_CUDA_DENSE_H_
-#define TOPI_CUDA_DENSE_H_
-
-#include <topi/contrib/cublas.h>
-#include <topi/detail/array_utils.h>
-#include <topi/generic/extern.h>
-#include <topi/nn/dense.h>
-#include <topi/tags.h>
+#ifndef TVM_TOPI_CUDA_DENSE_H_
+#define TVM_TOPI_CUDA_DENSE_H_
+
 #include <tvm/target/generic_func.h>
 #include <tvm/te/operation.h>
 #include <tvm/te/schedule_pass.h>
+#include <tvm/topi/contrib/cublas.h>
+#include <tvm/topi/detail/array_utils.h>
+#include <tvm/topi/generic/extern.h>
+#include <tvm/topi/nn/dense.h>
+#include <tvm/topi/tags.h>
 
+namespace tvm {
 namespace topi {
-using namespace tvm;
+
 using namespace tvm::te;
 
 namespace cuda {
@@ -149,4 +150,5 @@ inline Schedule schedule_dense(const Target& target, const Array<Tensor>& outs)
 
 }  // namespace cuda
 }  // namespace topi
-#endif  // TOPI_CUDA_DENSE_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_CUDA_DENSE_H_
similarity index 91%
rename from topi/include/topi/cuda/injective.h
rename to include/tvm/topi/cuda/injective.h
index e7bce05..010fa2c 100644 (file)
  * \file cuda/injective.h
  * \brief CUDA schedule for injective operations
  */
-#ifndef TOPI_CUDA_INJECTIVE_H_
-#define TOPI_CUDA_INJECTIVE_H_
+#ifndef TVM_TOPI_CUDA_INJECTIVE_H_
+#define TVM_TOPI_CUDA_INJECTIVE_H_
 
-#include <topi/detail/fuse.h>
-#include <topi/tags.h>
 #include <tvm/target/generic_func.h>
 #include <tvm/te/operation.h>
 #include <tvm/te/schedule_pass.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/tags.h>
 
+namespace tvm {
 namespace topi {
-using namespace tvm;
+
 using namespace tvm::te;
 
 namespace cuda {
@@ -78,4 +79,5 @@ inline Schedule schedule_injective(const Target& target, const Array<Tensor>& ou
 
 }  // namespace cuda
 }  // namespace topi
-#endif  // TOPI_CUDA_INJECTIVE_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_CUDA_INJECTIVE_H_
similarity index 93%
rename from topi/include/topi/cuda/normalization.h
rename to include/tvm/topi/cuda/normalization.h
index f8f498e..270b6af 100644 (file)
  * \file cuda/normalization.h
  * \brief CUDA schedule for LRN and l2 normalization operations
  */
-#ifndef TOPI_CUDA_NORMALIZATION_H_
-#define TOPI_CUDA_NORMALIZATION_H_
+#ifndef TVM_TOPI_CUDA_NORMALIZATION_H_
+#define TVM_TOPI_CUDA_NORMALIZATION_H_
 
-#include <topi/tags.h>
 #include <tvm/target/generic_func.h>
 #include <tvm/te/operation.h>
 #include <tvm/te/schedule_pass.h>
+#include <tvm/topi/tags.h>
 
+namespace tvm {
 namespace topi {
-using namespace tvm;
+
 using namespace tvm::te;
 namespace cuda {
 /*!
@@ -70,4 +71,5 @@ inline Schedule schedule_lrn(const Array<Tensor>& outs) {
 
 }  // namespace cuda
 }  // namespace topi
-#endif  // TOPI_CUDA_NORMALIZATION_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_CUDA_NORMALIZATION_H_
similarity index 95%
rename from topi/include/topi/cuda/pooling.h
rename to include/tvm/topi/cuda/pooling.h
index 7e8f55d..0bb9df4 100644 (file)
  * \file cuda/pooling.h
  * \brief CUDA schedule for pooling operations
  */
-#ifndef TOPI_CUDA_POOLING_H_
-#define TOPI_CUDA_POOLING_H_
+#ifndef TVM_TOPI_CUDA_POOLING_H_
+#define TVM_TOPI_CUDA_POOLING_H_
 
-#include <topi/detail/array_utils.h>
-#include <topi/detail/fuse.h>
-#include <topi/tags.h>
 #include <tvm/target/generic_func.h>
 #include <tvm/te/operation.h>
 #include <tvm/te/schedule_pass.h>
+#include <tvm/topi/detail/array_utils.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/tags.h>
 
+namespace tvm {
 namespace topi {
-using namespace tvm;
+
 using namespace tvm::te;
 
 namespace cuda {
@@ -182,4 +183,5 @@ inline Schedule schedule_global_pool(const Target& target, const Array<Tensor>&
 
 }  // namespace cuda
 }  // namespace topi
-#endif  // TOPI_CUDA_POOLING_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_CUDA_POOLING_H_
similarity index 96%
rename from topi/include/topi/cuda/reduction.h
rename to include/tvm/topi/cuda/reduction.h
index 377b922..18d4484 100644 (file)
  * \file cuda/reduction.h
  * \brief CUDA schedule for reduction operations
  */
-#ifndef TOPI_CUDA_REDUCTION_H_
-#define TOPI_CUDA_REDUCTION_H_
+#ifndef TVM_TOPI_CUDA_REDUCTION_H_
+#define TVM_TOPI_CUDA_REDUCTION_H_
 
-#include <topi/detail/fuse.h>
-#include <topi/tags.h>
 #include <tvm/target/generic_func.h>
 #include <tvm/te/operation.h>
 #include <tvm/te/schedule_pass.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/tags.h>
 
+namespace tvm {
 namespace topi {
-using namespace tvm;
+
 using namespace tvm::te;
 
 namespace cuda {
@@ -194,4 +195,5 @@ Schedule schedule_reduce(const Target& target, Array<Tensor> outs) {
 
 }  // namespace cuda
 }  // namespace topi
-#endif  // TOPI_CUDA_REDUCTION_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_CUDA_REDUCTION_H_
similarity index 93%
rename from topi/include/topi/cuda/softmax.h
rename to include/tvm/topi/cuda/softmax.h
index a3aa857..19613cb 100644 (file)
  * \file cuda/injective.h
  * \brief CUDA schedule for injective operations
  */
-#ifndef TOPI_CUDA_SOFTMAX_H_
-#define TOPI_CUDA_SOFTMAX_H_
+#ifndef TVM_TOPI_CUDA_SOFTMAX_H_
+#define TVM_TOPI_CUDA_SOFTMAX_H_
 
-#include <topi/detail/fuse.h>
-#include <topi/tags.h>
 #include <tvm/target/generic_func.h>
 #include <tvm/te/operation.h>
 #include <tvm/te/schedule_pass.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/tags.h>
 
+namespace tvm {
 namespace topi {
-using namespace tvm;
+
 using namespace tvm::te;
 
 namespace cuda {
@@ -98,4 +99,5 @@ inline Schedule schedule_softmax(const Target& target, const Array<Tensor>& outs
 
 }  // namespace cuda
 }  // namespace topi
-#endif  // TOPI_CUDA_SOFTMAX_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_CUDA_SOFTMAX_H_
similarity index 89%
rename from topi/include/topi/detail/array_utils.h
rename to include/tvm/topi/detail/array_utils.h
index d720472..89c9856 100644 (file)
  * \file array_utils.h
  * \brief Utility functions for handling arrays
  */
-#ifndef TOPI_DETAIL_ARRAY_UTILS_H_
-#define TOPI_DETAIL_ARRAY_UTILS_H_
+#ifndef TVM_TOPI_DETAIL_ARRAY_UTILS_H_
+#define TVM_TOPI_DETAIL_ARRAY_UTILS_H_
 
 #include <tvm/te/operation.h>
 
+namespace tvm {
 namespace topi {
 namespace detail {
-using namespace tvm;
+
 using namespace tvm::te;
 
 /*!
@@ -51,4 +52,5 @@ inline bool contains(Array<T> array, T item) {
 
 }  // namespace detail
 }  // namespace topi
-#endif  // TOPI_DETAIL_ARRAY_UTILS_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_DETAIL_ARRAY_UTILS_H_
similarity index 96%
rename from topi/include/topi/detail/broadcast.h
rename to include/tvm/topi/detail/broadcast.h
index ca30293..6bdebbd 100644 (file)
  * \brief Detail broadcast.
  * \file topi/detail/broadcast.h
  */
-#ifndef TOPI_DETAIL_BROADCAST_H_
-#define TOPI_DETAIL_BROADCAST_H_
+#ifndef TVM_TOPI_DETAIL_BROADCAST_H_
+#define TVM_TOPI_DETAIL_BROADCAST_H_
 
-#include <topi/detail/constant_utils.h>
 #include <tvm/te/operation.h>
+#include <tvm/topi/detail/constant_utils.h>
 
 #include <algorithm>
 #include <deque>
 #include <string>
 
+namespace tvm {
 namespace topi {
 namespace detail {
 
@@ -136,5 +137,6 @@ inline tvm::te::Tensor WithBroadcast(FBinaryExpr op, const tvm::te::Tensor& A,
 
 }  // namespace detail
 }  // namespace topi
+}  // namespace tvm
 
-#endif  // TOPI_DETAIL_BROADCAST_H_
+#endif  // TVM_TOPI_DETAIL_BROADCAST_H_
similarity index 95%
rename from topi/include/topi/detail/constant_utils.h
rename to include/tvm/topi/detail/constant_utils.h
index 9bd1251..03317c2 100644 (file)
@@ -21,8 +21,8 @@
  * \file constant_utils.h
  * \brief Utility functions for handling constants in TVM expressions
  */
-#ifndef TOPI_DETAIL_CONSTANT_UTILS_H_
-#define TOPI_DETAIL_CONSTANT_UTILS_H_
+#ifndef TVM_TOPI_DETAIL_CONSTANT_UTILS_H_
+#define TVM_TOPI_DETAIL_CONSTANT_UTILS_H_
 
 #include <tvm/arith/analyzer.h>
 #include <tvm/te/operation.h>
 #include <string>
 #include <vector>
 
+namespace tvm {
 namespace topi {
 namespace detail {
-using namespace tvm;
+
 using namespace tvm::te;
 
 /*!
@@ -122,4 +123,5 @@ inline bool EqualCheck(PrimExpr lhs, PrimExpr rhs) {
 
 }  // namespace detail
 }  // namespace topi
-#endif  // TOPI_DETAIL_CONSTANT_UTILS_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_DETAIL_CONSTANT_UTILS_H_
similarity index 97%
rename from topi/include/topi/detail/extern.h
rename to include/tvm/topi/detail/extern.h
index 5349818..48c3e18 100644 (file)
@@ -21,8 +21,8 @@
  * \file detail/extern.h
  * \brief Helpers for using external functions
  */
-#ifndef TOPI_DETAIL_EXTERN_H_
-#define TOPI_DETAIL_EXTERN_H_
+#ifndef TVM_TOPI_DETAIL_EXTERN_H_
+#define TVM_TOPI_DETAIL_EXTERN_H_
 
 #include <tvm/te/operation.h>
 #include <tvm/tir/builtin.h>
 #include <string>
 #include <vector>
 
+namespace tvm {
 namespace topi {
 namespace detail {
-using namespace tvm;
+
 using namespace tvm::te;
 
 /*!
@@ -145,4 +146,5 @@ inline PrimExpr call_packed(Array<PrimExpr> args) {
 
 }  // namespace detail
 }  // namespace topi
-#endif  // TOPI_DETAIL_EXTERN_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_DETAIL_EXTERN_H_
similarity index 90%
rename from topi/include/topi/detail/fuse.h
rename to include/tvm/topi/detail/fuse.h
index 90c1c20..7305cce 100644 (file)
  * \file fuse.h
  * \brief Fuse operation
  */
-#ifndef TOPI_DETAIL_FUSE_H_
-#define TOPI_DETAIL_FUSE_H_
+#ifndef TVM_TOPI_DETAIL_FUSE_H_
+#define TVM_TOPI_DETAIL_FUSE_H_
 
 #include <tvm/te/operation.h>
 
+namespace tvm {
 namespace topi {
 namespace detail {
-using namespace tvm;
+
 using namespace tvm::te;
 
 /*!
@@ -47,4 +48,5 @@ inline IterVar Fuse(Stage stage, const Array<IterVar>& args) {
 
 }  // namespace detail
 }  // namespace topi
-#endif  // TOPI_DETAIL_FUSE_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_DETAIL_FUSE_H_
similarity index 91%
rename from topi/include/topi/detail/pad_utils.h
rename to include/tvm/topi/detail/pad_utils.h
index 7c416ec..96eb49a 100644 (file)
@@ -21,8 +21,8 @@
  * \file pad_utils.h
  * \brief Padding helpers
  */
-#ifndef TOPI_DETAIL_PAD_UTILS_H_
-#define TOPI_DETAIL_PAD_UTILS_H_
+#ifndef TVM_TOPI_DETAIL_PAD_UTILS_H_
+#define TVM_TOPI_DETAIL_PAD_UTILS_H_
 
 #include <tvm/te/operation.h>
 #include <tvm/tir/expr.h>
 
 #include <vector>
 
+namespace tvm {
 namespace topi {
 namespace detail {
-using namespace tvm;
+
 using namespace tvm::te;
 
 /*!
@@ -56,4 +57,5 @@ inline Array<PrimExpr> GetPadTuple(PrimExpr pad_h, PrimExpr pad_w) {
 
 }  // namespace detail
 }  // namespace topi
-#endif  // TOPI_DETAIL_PAD_UTILS_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_DETAIL_PAD_UTILS_H_
similarity index 92%
rename from topi/include/topi/detail/ravel_unravel.h
rename to include/tvm/topi/detail/ravel_unravel.h
index c87f2c9..fc77509 100644 (file)
  * \file ravel_unravel.h
  * \brief Index ravel and unraval operations
  */
-#ifndef TOPI_DETAIL_RAVEL_UNRAVEL_H_
-#define TOPI_DETAIL_RAVEL_UNRAVEL_H_
+#ifndef TVM_TOPI_DETAIL_RAVEL_UNRAVEL_H_
+#define TVM_TOPI_DETAIL_RAVEL_UNRAVEL_H_
 
 #include <tvm/te/operation.h>
 
 #include <vector>
 
+namespace tvm {
 namespace topi {
 namespace detail {
-using namespace tvm;
+
 using namespace tvm::te;
 
 /*!
@@ -76,4 +77,5 @@ inline Array<PrimExpr> UnravelIndex(PrimExpr idx, Array<PrimExpr> shape) {
 
 }  // namespace detail
 }  // namespace topi
-#endif  // TOPI_DETAIL_RAVEL_UNRAVEL_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_DETAIL_RAVEL_UNRAVEL_H_
similarity index 94%
rename from topi/include/topi/detail/tensor_utils.h
rename to include/tvm/topi/detail/tensor_utils.h
index d144c75..7004c35 100644 (file)
  * \file tensor_utils.h
  * \brief Utility functions for handling tensor
  */
-#ifndef TOPI_DETAIL_TENSOR_UTILS_H_
-#define TOPI_DETAIL_TENSOR_UTILS_H_
+#ifndef TVM_TOPI_DETAIL_TENSOR_UTILS_H_
+#define TVM_TOPI_DETAIL_TENSOR_UTILS_H_
 
 #include <tvm/te/operation.h>
 
+namespace tvm {
 namespace topi {
 namespace detail {
-using namespace tvm;
+
 using namespace tvm::te;
 
 /*!
@@ -90,4 +91,5 @@ inline PrimExpr bilinear_sample_nchw(const Tensor& input, const Array<PrimExpr>&
 
 }  // namespace detail
 }  // namespace topi
-#endif  // TOPI_DETAIL_TENSOR_UTILS_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_DETAIL_TENSOR_UTILS_H_
similarity index 99%
rename from topi/include/topi/elemwise.h
rename to include/tvm/topi/elemwise.h
index 9b418d0..f537c9c 100644 (file)
  * \file elemwise.h
  * \brief Elementwise op constructions
  */
-#ifndef TOPI_ELEMWISE_H_
-#define TOPI_ELEMWISE_H_
+#ifndef TVM_TOPI_ELEMWISE_H_
+#define TVM_TOPI_ELEMWISE_H_
 
-#include <topi/tags.h>
 #include <tvm/tir/builtin.h>
 #include <tvm/tir/expr.h>
+#include <tvm/topi/tags.h>
 
 #include <algorithm>
 #include <string>
 
 #include "broadcast.h"
 
+namespace tvm {
 namespace topi {
-using namespace tvm;
+
 using namespace tvm::te;
 
 // Unary intrinsic operators
@@ -525,4 +526,5 @@ inline Tensor fast_erf(const Tensor& x, std::string name = "T_fast_erf",
 }
 
 }  // namespace topi
-#endif  // TOPI_ELEMWISE_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_ELEMWISE_H_
similarity index 91%
rename from topi/include/topi/generic/default.h
rename to include/tvm/topi/generic/default.h
index 403b943..752b6ad 100644 (file)
  * \file generic/default.h
  * \brief Generic default schedule
  */
-#ifndef TOPI_GENERIC_DEFAULT_H_
-#define TOPI_GENERIC_DEFAULT_H_
+#ifndef TVM_TOPI_GENERIC_DEFAULT_H_
+#define TVM_TOPI_GENERIC_DEFAULT_H_
 
-#include <topi/detail/fuse.h>
-#include <topi/tags.h>
 #include <tvm/target/generic_func.h>
 #include <tvm/te/operation.h>
 #include <tvm/te/schedule_pass.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/tags.h>
 
+namespace tvm {
 namespace topi {
-using namespace tvm;
+
 using namespace tvm::te;
 
 namespace generic {
@@ -78,4 +79,5 @@ inline Schedule default_schedule_auto_inline(const Target& target, const Array<T
 
 }  // namespace generic
 }  // namespace topi
-#endif  // TOPI_GENERIC_DEFAULT_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_GENERIC_DEFAULT_H_
similarity index 87%
rename from topi/include/topi/generic/extern.h
rename to include/tvm/topi/generic/extern.h
index 3954ac6..0f1f408 100644 (file)
  * \file generic/extern.h
  * \brief Schedule for extern followed by injective ops
  */
-#ifndef TOPI_GENERIC_EXTERN_H_
-#define TOPI_GENERIC_EXTERN_H_
+#ifndef TVM_TOPI_GENERIC_EXTERN_H_
+#define TVM_TOPI_GENERIC_EXTERN_H_
 
-#include <topi/detail/fuse.h>
-#include <topi/generic/injective.h>
-#include <topi/tags.h>
 #include <tvm/target/generic_func.h>
 #include <tvm/te/operation.h>
 #include <tvm/te/schedule_pass.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/generic/injective.h>
+#include <tvm/topi/tags.h>
 
+namespace tvm {
 namespace topi {
-using namespace tvm;
+
 using namespace tvm::te;
 
 namespace generic {
@@ -64,4 +65,5 @@ inline Schedule schedule_extern(const Target& target, const Array<Tensor>& outs)
 
 }  // namespace generic
 }  // namespace topi
-#endif  // TOPI_GENERIC_EXTERN_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_GENERIC_EXTERN_H_
similarity index 90%
rename from topi/include/topi/generic/injective.h
rename to include/tvm/topi/generic/injective.h
index 69962dc..c48c03e 100644 (file)
  * \file generic/injective.h
  * \brief Generic schedule for injective operations
  */
-#ifndef TOPI_GENERIC_INJECTIVE_H_
-#define TOPI_GENERIC_INJECTIVE_H_
+#ifndef TVM_TOPI_GENERIC_INJECTIVE_H_
+#define TVM_TOPI_GENERIC_INJECTIVE_H_
 
-#include <topi/detail/fuse.h>
-#include <topi/tags.h>
 #include <tvm/target/generic_func.h>
 #include <tvm/te/operation.h>
 #include <tvm/te/schedule_pass.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/tags.h>
 
+namespace tvm {
 namespace topi {
-using namespace tvm;
+
 using namespace tvm::te;
 
 namespace generic {
@@ -72,4 +73,5 @@ inline Schedule schedule_injective(const Target& target, const Array<Tensor>& ou
 
 }  // namespace generic
 }  // namespace topi
-#endif  // TOPI_GENERIC_INJECTIVE_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_GENERIC_INJECTIVE_H_
similarity index 99%
rename from topi/include/topi/nn.h
rename to include/tvm/topi/nn.h
index 2a195b3..17eb0d0 100644 (file)
  * \brief NN op constructions
  * \file topi/nn.h
  */
-#ifndef TOPI_NN_H_
-#define TOPI_NN_H_
+#ifndef TVM_TOPI_NN_H_
+#define TVM_TOPI_NN_H_
 
-#include <topi/detail/constant_utils.h>
-#include <topi/tags.h>
 #include <tvm/arith/analyzer.h>
 #include <tvm/te/operation.h>
 #include <tvm/tir/expr.h>
 #include <tvm/tir/op.h>
+#include <tvm/topi/detail/constant_utils.h>
+#include <tvm/topi/tags.h>
 
 #include <algorithm>
 #include <string>
 
+namespace tvm {
 namespace topi {
-using namespace tvm;
+
 using namespace tvm::te;
 
 /*!
@@ -447,4 +448,5 @@ inline tvm::te::Tensor group_conv2d_ngchw(const tvm::te::Tensor& I, const tvm::t
 }
 
 }  // namespace topi
-#endif  // TOPI_NN_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_NN_H_
similarity index 91%
rename from topi/include/topi/nn/batch_matmul.h
rename to include/tvm/topi/nn/batch_matmul.h
index 80525c4..bffddca 100644 (file)
  * \brief Batch matmul op constructions
  * \file nn/batch_matmul.h
  */
-#ifndef TOPI_NN_BATCH_MATMUL_H_
-#define TOPI_NN_BATCH_MATMUL_H_
+#ifndef TVM_TOPI_NN_BATCH_MATMUL_H_
+#define TVM_TOPI_NN_BATCH_MATMUL_H_
 
-#include <topi/tags.h>
 #include <tvm/te/operation.h>
+#include <tvm/topi/tags.h>
 
 #include <string>
 
+namespace tvm {
 namespace topi {
 namespace nn {
-using namespace tvm;
+
 using namespace tvm::te;
 
 /*!
@@ -61,5 +62,6 @@ inline tvm::te::Tensor batch_matmul(const tvm::te::Tensor& x, const tvm::te::Ten
 
 }  // namespace nn
 }  // namespace topi
+}  // namespace tvm
 
-#endif  // TOPI_NN_BATCH_MATMUL_H_
+#endif  // TVM_TOPI_NN_BATCH_MATMUL_H_
similarity index 87%
rename from topi/include/topi/nn/bias_add.h
rename to include/tvm/topi/nn/bias_add.h
index 18e95de..03c026c 100644 (file)
  * \brief bias_add op constructions
  * \file nn/bias_add.h
  */
-#ifndef TOPI_NN_BIAS_ADD_H_
-#define TOPI_NN_BIAS_ADD_H_
+#ifndef TVM_TOPI_NN_BIAS_ADD_H_
+#define TVM_TOPI_NN_BIAS_ADD_H_
 
-#include <topi/broadcast.h>
-#include <topi/tags.h>
-#include <topi/transform.h>
 #include <tvm/te/operation.h>
+#include <tvm/topi/broadcast.h>
+#include <tvm/topi/tags.h>
+#include <tvm/topi/transform.h>
 
 #include <string>
 
+namespace tvm {
 namespace topi {
 namespace nn {
 
@@ -53,4 +54,5 @@ inline tvm::te::Tensor bias_add(const tvm::te::Tensor& data, const tvm::te::Tens
 }
 }  // namespace nn
 }  // namespace topi
-#endif  // TOPI_NN_BIAS_ADD_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_NN_BIAS_ADD_H_
similarity index 95%
rename from topi/include/topi/nn/bnn.h
rename to include/tvm/topi/nn/bnn.h
index c0626cd..f729508 100644 (file)
  * \brief Binary op constructions
  * \file nn/bnn.h
  */
-#ifndef TOPI_NN_BNN_H_
-#define TOPI_NN_BNN_H_
+#ifndef TVM_TOPI_NN_BNN_H_
+#define TVM_TOPI_NN_BNN_H_
 
-#include <topi/detail/constant_utils.h>
-#include <topi/tags.h>
 #include <tvm/arith/analyzer.h>
 #include <tvm/te/operation.h>
+#include <tvm/topi/detail/constant_utils.h>
+#include <tvm/topi/tags.h>
 
 #include <string>
 
+namespace tvm {
 namespace topi {
 namespace nn {
-using namespace tvm;
+
 using namespace tvm::te;
 
 /*!
@@ -120,4 +121,5 @@ inline tvm::te::Tensor binary_dense(const tvm::te::Tensor& data, const tvm::te::
 
 }  // namespace nn
 }  // namespace topi
-#endif  // TOPI_NN_BNN_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_NN_BNN_H_
similarity index 93%
rename from topi/include/topi/nn/dense.h
rename to include/tvm/topi/nn/dense.h
index 4ee36c2..ad18cb0 100644 (file)
  * \brief Dense op constructions
  * \file nn/dense.h
  */
-#ifndef TOPI_NN_DENSE_H_
-#define TOPI_NN_DENSE_H_
+#ifndef TVM_TOPI_NN_DENSE_H_
+#define TVM_TOPI_NN_DENSE_H_
 
-#include <topi/tags.h>
 #include <tvm/te/operation.h>
+#include <tvm/topi/tags.h>
 
 #include <string>
 
+namespace tvm {
 namespace topi {
 namespace nn {
-using namespace tvm;
+
 using namespace tvm::te;
 
 /*!
@@ -76,4 +77,5 @@ inline tvm::te::Tensor dense(const tvm::te::Tensor& data, const tvm::te::Tensor&
 
 }  // namespace nn
 }  // namespace topi
-#endif  // TOPI_NN_DENSE_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_NN_DENSE_H_
similarity index 95%
rename from topi/include/topi/nn/dilate.h
rename to include/tvm/topi/nn/dilate.h
index 0d3ab89..a021402 100644 (file)
  * \brief Dilate op constructions
  * \file nn/dilate.h
  */
-#ifndef TOPI_NN_DILATE_H_
-#define TOPI_NN_DILATE_H_
+#ifndef TVM_TOPI_NN_DILATE_H_
+#define TVM_TOPI_NN_DILATE_H_
 
-#include <topi/tags.h>
 #include <tvm/arith/analyzer.h>
 #include <tvm/te/operation.h>
+#include <tvm/topi/tags.h>
 
 #include <string>
 
+namespace tvm {
 namespace topi {
 namespace nn {
-using namespace tvm;
+
 using namespace tvm::te;
 
 /*!
@@ -102,4 +103,5 @@ inline Tensor dilate(const Tensor& x, Array<PrimExpr> strides, std::string name
 
 }  // namespace nn
 }  // namespace topi
-#endif  // TOPI_NN_DILATE_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_NN_DILATE_H_
similarity index 91%
rename from topi/include/topi/nn/flatten.h
rename to include/tvm/topi/nn/flatten.h
index 1ac5de4..cd96d30 100644 (file)
  * \brief Softmax op constructions
  * \file nn/flatten.h
  */
-#ifndef TOPI_NN_FLATTEN_H_
-#define TOPI_NN_FLATTEN_H_
+#ifndef TVM_TOPI_NN_FLATTEN_H_
+#define TVM_TOPI_NN_FLATTEN_H_
 
-#include <topi/detail/constant_utils.h>
-#include <topi/tags.h>
 #include <tvm/te/operation.h>
+#include <tvm/topi/detail/constant_utils.h>
+#include <tvm/topi/tags.h>
 
 #include <string>
 #include <vector>
 
+namespace tvm {
 namespace topi {
 namespace nn {
-using namespace tvm;
+
 using namespace tvm::te;
 
 /*!
@@ -79,4 +80,5 @@ inline Tensor flatten(const Tensor& x, std::string name = "tensor", std::string
 
 }  // namespace nn
 }  // namespace topi
-#endif  // TOPI_NN_FLATTEN_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_NN_FLATTEN_H_
similarity index 93%
rename from topi/include/topi/nn/local_response_norm.h
rename to include/tvm/topi/nn/local_response_norm.h
index 4e8dfd9..0170c50 100644 (file)
  * \brief local response normalization op constructions
  * \file nn/local_response_norm.h
  */
-#ifndef TOPI_NN_LOCAL_RESPONSE_NORM_H_
-#define TOPI_NN_LOCAL_RESPONSE_NORM_H_
+#ifndef TVM_TOPI_NN_LOCAL_RESPONSE_NORM_H_
+#define TVM_TOPI_NN_LOCAL_RESPONSE_NORM_H_
 
-#include <topi/tags.h>
 #include <tvm/te/operation.h>
+#include <tvm/topi/tags.h>
 
 #include <string>
 
+namespace tvm {
 namespace topi {
 namespace nn {
-using namespace tvm;
+
 using namespace tvm::te;
 
 /*!
@@ -78,4 +79,5 @@ inline Tensor lrn(const Tensor& data, int size, int axis = 1, float alpha = 0.00
 }
 }  // namespace nn
 }  // namespace topi
-#endif  // TOPI_NN_LOCAL_RESPONSE_NORM_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_NN_LOCAL_RESPONSE_NORM_H_
similarity index 93%
rename from topi/include/topi/nn/mapping.h
rename to include/tvm/topi/nn/mapping.h
index 2bf3314..d6a8716 100644 (file)
  * \brief Mapping op constructions
  * \file nn/mapping.h
  */
-#ifndef TOPI_NN_MAPPING_H_
-#define TOPI_NN_MAPPING_H_
+#ifndef TVM_TOPI_NN_MAPPING_H_
+#define TVM_TOPI_NN_MAPPING_H_
 
-#include <topi/tags.h>
 #include <tvm/te/operation.h>
+#include <tvm/topi/tags.h>
 
 #include <string>
 
+namespace tvm {
 namespace topi {
 namespace nn {
-using namespace tvm;
+
 using namespace tvm::te;
 
 /*!
@@ -72,4 +73,5 @@ inline Tensor scale_shift_nhwc(const Tensor& x, const Tensor& scale, const Tenso
 
 }  // namespace nn
 }  // namespace topi
-#endif  // TOPI_NN_MAPPING_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_NN_MAPPING_H_
similarity index 99%
rename from topi/include/topi/nn/pooling.h
rename to include/tvm/topi/nn/pooling.h
index f6435cd..b6852ff 100644 (file)
  * \brief Pooling op constructions
  * \file nn/pooling.h
  */
-#ifndef TOPI_NN_POOLING_H_
-#define TOPI_NN_POOLING_H_
+#ifndef TVM_TOPI_NN_POOLING_H_
+#define TVM_TOPI_NN_POOLING_H_
 
-#include <topi/detail/pad_utils.h>
-#include <topi/nn.h>
-#include <topi/reduction.h>
-#include <topi/tags.h>
 #include <tvm/arith/analyzer.h>
+#include <tvm/topi/detail/pad_utils.h>
+#include <tvm/topi/nn.h>
+#include <tvm/topi/reduction.h>
+#include <tvm/topi/tags.h>
 
 #include <algorithm>
 #include <string>
 #include <vector>
 
+namespace tvm {
 namespace topi {
 namespace nn {
-using namespace tvm;
+
 using namespace tvm::te;
 
 /*! \brief Pooling type */
@@ -843,4 +844,5 @@ inline Tensor pool3d(const Tensor& x, const Array<PrimExpr>& kernel_size,
 
 }  // namespace nn
 }  // namespace topi
-#endif  // TOPI_NN_POOLING_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_NN_POOLING_H_
similarity index 96%
rename from topi/include/topi/nn/softmax.h
rename to include/tvm/topi/nn/softmax.h
index 5ebeb6b..2e94f91 100644 (file)
  * \brief Softmax op constructions
  * \file nn/softmax.h
  */
-#ifndef TOPI_NN_SOFTMAX_H_
-#define TOPI_NN_SOFTMAX_H_
+#ifndef TVM_TOPI_NN_SOFTMAX_H_
+#define TVM_TOPI_NN_SOFTMAX_H_
 
-#include <topi/reduction.h>
-#include <topi/tags.h>
 #include <tvm/te/operation.h>
+#include <tvm/topi/reduction.h>
+#include <tvm/topi/tags.h>
 
 #include <algorithm>
 #include <string>
 
+namespace tvm {
 namespace topi {
 namespace nn {
-using namespace tvm;
+
 using namespace tvm::te;
 
 /*!
@@ -143,4 +144,5 @@ inline Tensor log_softmax(const Tensor& x, std::string name = "tensor",
 
 }  // namespace nn
 }  // namespace topi
-#endif  // TOPI_NN_SOFTMAX_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_NN_SOFTMAX_H_
similarity index 98%
rename from topi/include/topi/reduction.h
rename to include/tvm/topi/reduction.h
index 8555500..8a8a947 100644 (file)
  * \file topi/reduction.h
  * \brief Reduction op constructors
  */
-#ifndef TOPI_REDUCTION_H_
-#define TOPI_REDUCTION_H_
-
-#include <topi/broadcast.h>
-#include <topi/detail/constant_utils.h>
-#include <topi/detail/ravel_unravel.h>
-#include <topi/elemwise.h>
-#include <topi/tags.h>
-#include <topi/transform.h>
+#ifndef TVM_TOPI_REDUCTION_H_
+#define TVM_TOPI_REDUCTION_H_
+
 #include <tvm/te/operation.h>
+#include <tvm/topi/broadcast.h>
+#include <tvm/topi/detail/constant_utils.h>
+#include <tvm/topi/detail/ravel_unravel.h>
+#include <tvm/topi/elemwise.h>
+#include <tvm/topi/tags.h>
+#include <tvm/topi/transform.h>
 
 #include <algorithm>
 #include <iterator>
 #include <string>
 #include <vector>
 
+namespace tvm {
 namespace topi {
-using namespace tvm;
+
 using namespace tvm::te;
 
 /*! \brief The operation to use for CommReduce */
@@ -510,4 +511,5 @@ inline Tensor prod(const Tensor& data, const Array<Integer>& axis, bool keepdims
 }
 
 }  // namespace topi
-#endif  // TOPI_REDUCTION_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_REDUCTION_H_
similarity index 89%
rename from topi/include/topi/rocm/dense.h
rename to include/tvm/topi/rocm/dense.h
index e2e04b4..e279152 100644 (file)
  * \file rocm/dense.h
  * \brief rocm schedule for dense operation
  */
-#ifndef TOPI_ROCM_DENSE_H_
-#define TOPI_ROCM_DENSE_H_
+#ifndef TVM_TOPI_ROCM_DENSE_H_
+#define TVM_TOPI_ROCM_DENSE_H_
 
-#include <topi/tags.h>
 #include <tvm/target/generic_func.h>
 #include <tvm/te/operation.h>
+#include <tvm/topi/contrib/rocblas.h>
+#include <tvm/topi/cuda/dense.h>
+#include <tvm/topi/detail/array_utils.h>
+#include <tvm/topi/generic/extern.h>
+#include <tvm/topi/nn/dense.h>
+#include <tvm/topi/tags.h>
 
-#include "topi/contrib/rocblas.h"
-#include "topi/cuda/dense.h"
-#include "topi/detail/array_utils.h"
-#include "topi/generic/extern.h"
-#include "topi/nn/dense.h"
-
+namespace tvm {
 namespace topi {
-using namespace tvm;
+
 using namespace tvm::te;
 
 namespace rocm {
@@ -95,4 +95,5 @@ inline Schedule schedule_dense(const Target& target, const Array<Tensor>& outs)
 
 }  // namespace rocm
 }  // namespace topi
-#endif  // TOPI_ROCM_DENSE_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_ROCM_DENSE_H_
similarity index 88%
rename from topi/include/topi/rocm/injective.h
rename to include/tvm/topi/rocm/injective.h
index e7415bf..295d930 100644 (file)
  * \file rocm/injective.h
  * \brief rocm schedule for injective operations
  */
-#ifndef TOPI_ROCM_INJECTIVE_H_
-#define TOPI_ROCM_INJECTIVE_H_
+#ifndef TVM_TOPI_ROCM_INJECTIVE_H_
+#define TVM_TOPI_ROCM_INJECTIVE_H_
 
-#include <topi/detail/fuse.h>
-#include <topi/tags.h>
 #include <tvm/target/generic_func.h>
 #include <tvm/te/operation.h>
+#include <tvm/topi/cuda/injective.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/tags.h>
 
-#include "topi/cuda/injective.h"
-
+namespace tvm {
 namespace topi {
-using namespace tvm;
+
 using namespace tvm::te;
 
 namespace rocm {
@@ -63,4 +63,5 @@ inline Schedule schedule_injective(const Target& target, const Array<Tensor>& ou
 
 }  // namespace rocm
 }  // namespace topi
-#endif  // TOPI_ROCM_INJECTIVE_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_ROCM_INJECTIVE_H_
similarity index 87%
rename from topi/include/topi/rocm/normalization.h
rename to include/tvm/topi/rocm/normalization.h
index 8328683..2fbb880 100644 (file)
  * \file rocm/normalization.h
  * \brief rocm schedule for LRN and l2 normalization operations
  */
-#ifndef TOPI_ROCM_NORMALIZATION_H_
-#define TOPI_ROCM_NORMALIZATION_H_
+#ifndef TVM_TOPI_ROCM_NORMALIZATION_H_
+#define TVM_TOPI_ROCM_NORMALIZATION_H_
 
-#include <topi/tags.h>
 #include <tvm/target/generic_func.h>
 #include <tvm/te/operation.h>
+#include <tvm/topi/tags.h>
 
+namespace tvm {
 namespace topi {
-using namespace tvm;
+
 using namespace tvm::te;
 namespace rocm {
 /*!
@@ -41,4 +42,5 @@ inline Schedule schedule_lrn(const Array<Tensor>& outs) { return topi::cuda::sch
 
 }  // namespace rocm
 }  // namespace topi
-#endif  // TOPI_ROCM_NORMALIZATION_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_ROCM_NORMALIZATION_H_
similarity index 86%
rename from topi/include/topi/rocm/pooling.h
rename to include/tvm/topi/rocm/pooling.h
index 0b68a0a..993c32b 100644 (file)
  * \file rocm/pooling.h
  * \brief rocm schedule for pooling operations
  */
-#ifndef TOPI_ROCM_POOLING_H_
-#define TOPI_ROCM_POOLING_H_
+#ifndef TVM_TOPI_ROCM_POOLING_H_
+#define TVM_TOPI_ROCM_POOLING_H_
 
-#include <topi/cuda/pooling.h>
-#include <topi/detail/array_utils.h>
-#include <topi/detail/fuse.h>
-#include <topi/tags.h>
 #include <tvm/target/generic_func.h>
 #include <tvm/te/operation.h>
+#include <tvm/topi/cuda/pooling.h>
+#include <tvm/topi/detail/array_utils.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/tags.h>
 
+namespace tvm {
 namespace topi {
-using namespace tvm;
+
 using namespace tvm::te;
 
 namespace rocm {
@@ -63,4 +64,5 @@ inline Schedule schedule_global_pool(const Target& target, const Array<Tensor>&
 
 }  // namespace rocm
 }  // namespace topi
-#endif  // TOPI_ROCM_POOLING_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_ROCM_POOLING_H_
similarity index 85%
rename from topi/include/topi/rocm/reduction.h
rename to include/tvm/topi/rocm/reduction.h
index 512bf20..7beda17 100644 (file)
  * \file rocm/reduction.h
  * \brief rocm schedule for reduction operations
  */
-#ifndef TOPI_ROCM_REDUCTION_H_
-#define TOPI_ROCM_REDUCTION_H_
+#ifndef TVM_TOPI_ROCM_REDUCTION_H_
+#define TVM_TOPI_ROCM_REDUCTION_H_
 
-#include <topi/detail/fuse.h>
-#include <topi/tags.h>
 #include <tvm/target/generic_func.h>
 #include <tvm/te/operation.h>
+#include <tvm/topi/cuda/reduction.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/tags.h>
 
-#include "topi/cuda/reduction.h"
-
+namespace tvm {
 namespace topi {
-using namespace tvm;
+
 using namespace tvm::te;
 
 namespace rocm {
@@ -50,4 +50,5 @@ Schedule schedule_reduce(const Target& target, Array<Tensor> outs) {
 
 }  // namespace rocm
 }  // namespace topi
-#endif  // TOPI_ROCM_REDUCTION_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_ROCM_REDUCTION_H_
similarity index 85%
rename from topi/include/topi/rocm/softmax.h
rename to include/tvm/topi/rocm/softmax.h
index de05c4c..a2ffd2c 100644 (file)
  * \file rocm/injective.h
  * \brief ROCM schedule for injective operations
  */
-#ifndef TOPI_ROCM_SOFTMAX_H_
-#define TOPI_ROCM_SOFTMAX_H_
+#ifndef TVM_TOPI_ROCM_SOFTMAX_H_
+#define TVM_TOPI_ROCM_SOFTMAX_H_
 
-#include <topi/detail/fuse.h>
-#include <topi/tags.h>
 #include <tvm/target/generic_func.h>
 #include <tvm/te/operation.h>
+#include <tvm/topi/cuda/softmax.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/tags.h>
 
-#include "topi/cuda/softmax.h"
-
+namespace tvm {
 namespace topi {
-using namespace tvm;
+
 using namespace tvm::te;
 
 namespace rocm {
@@ -51,4 +51,5 @@ inline Schedule schedule_softmax(const Target& target, const Array<Tensor>& outs
 
 }  // namespace rocm
 }  // namespace topi
-#endif  // TOPI_ROCM_SOFTMAX_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_ROCM_SOFTMAX_H_
similarity index 94%
rename from topi/include/topi/tags.h
rename to include/tvm/topi/tags.h
index 1e9ec44..3b748ca 100644 (file)
  * \brief Tag definitions
  * \file tags.h
  */
-#ifndef TOPI_TAGS_H_
-#define TOPI_TAGS_H_
+#ifndef TVM_TOPI_TAGS_H_
+#define TVM_TOPI_TAGS_H_
 
 #include <string>
 
+namespace tvm {
 namespace topi {
 
 constexpr auto kElementWise = "elemwise";
@@ -52,5 +53,6 @@ inline bool is_injective(std::string tag) {
 }
 
 }  // namespace topi
+}  // namespace tvm
 
-#endif  // TOPI_TAGS_H_
+#endif  // TVM_TOPI_TAGS_H_
similarity index 99%
rename from topi/include/topi/transform.h
rename to include/tvm/topi/transform.h
index 0b339d2..cd19436 100644 (file)
  * \file topi/transform.h
  * \brief Transform op constructors
  */
-#ifndef TOPI_TRANSFORM_H_
-#define TOPI_TRANSFORM_H_
+#ifndef TVM_TOPI_TRANSFORM_H_
+#define TVM_TOPI_TRANSFORM_H_
 
-#include <topi/detail/constant_utils.h>
-#include <topi/detail/ravel_unravel.h>
-#include <topi/detail/tensor_utils.h>
-#include <topi/tags.h>
 #include <tvm/te/operation.h>
 #include <tvm/tir/data_layout.h>
+#include <tvm/topi/detail/constant_utils.h>
+#include <tvm/topi/detail/ravel_unravel.h>
+#include <tvm/topi/detail/tensor_utils.h>
+#include <tvm/topi/tags.h>
 
 #include <algorithm>
 #include <iterator>
@@ -38,8 +38,9 @@
 #include <unordered_set>
 #include <vector>
 
+namespace tvm {
 namespace topi {
-using namespace tvm;
+
 using namespace tvm::te;
 using namespace topi::detail;
 
@@ -1508,4 +1509,5 @@ inline Tensor sparse_to_dense(const Tensor& sparse_indices, const Array<Integer>
 }
 
 }  // namespace topi
-#endif  // TOPI_TRANSFORM_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_TRANSFORM_H_
similarity index 92%
rename from topi/include/topi/util.h
rename to include/tvm/topi/util.h
index 133bc85..4e0cdc6 100644 (file)
  * \brief Topi utility function
  * \file topi/util.h
  */
-#ifndef TOPI_UTIL_H_
-#define TOPI_UTIL_H_
+#ifndef TVM_TOPI_UTIL_H_
+#define TVM_TOPI_UTIL_H_
 
 #include <tvm/ir/expr.h>
 #include <tvm/runtime/packed_func.h>
 
+namespace tvm {
 namespace topi {
 
-using namespace tvm;
 using namespace tvm::runtime;
 
 /*! \brief Canonicalize an argument that may be Array<Expr> or int to Array<Expr> */
@@ -43,4 +43,5 @@ inline Array<Integer> ArrayOrInt(TVMArgValue arg) {
   }
 }
 }  // namespace topi
-#endif  // TOPI_UTIL_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_UTIL_H_
similarity index 89%
rename from topi/include/topi/vision/reorg.h
rename to include/tvm/topi/vision/reorg.h
index 5bd79f6..381272b 100644 (file)
  * \brief Reorg op constructions
  * \file vision/reorg.h
  */
-#ifndef TOPI_VISION_REORG_H_
-#define TOPI_VISION_REORG_H_
+#ifndef TVM_TOPI_VISION_REORG_H_
+#define TVM_TOPI_VISION_REORG_H_
 
-#include <topi/detail/constant_utils.h>
-#include <topi/reduction.h>
-#include <topi/tags.h>
-#include <topi/transform.h>
 #include <tvm/te/operation.h>
+#include <tvm/topi/detail/constant_utils.h>
+#include <tvm/topi/reduction.h>
+#include <tvm/topi/tags.h>
+#include <tvm/topi/transform.h>
 
 #include <algorithm>
 #include <string>
 
+namespace tvm {
 namespace topi {
 namespace vision {
-using namespace tvm;
+
 using namespace tvm::te;
 
 /*!
@@ -76,4 +77,5 @@ inline Tensor reorg(const Tensor& data, int stride = 1, std::string name = "tens
 }
 }  // namespace vision
 }  // namespace topi
-#endif  // TOPI_VISION_REORG_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_VISION_REORG_H_
similarity index 94%
rename from topi/include/topi/x86/bnn.h
rename to include/tvm/topi/x86/bnn.h
index a59d30d..c8a7235 100644 (file)
  * \file x86/bnn.h
  * \brief x86 schedule for binary operations
  */
-#ifndef TOPI_X86_BNN_H_
-#define TOPI_X86_BNN_H_
+#ifndef TVM_TOPI_X86_BNN_H_
+#define TVM_TOPI_X86_BNN_H_
 
-#include <topi/detail/fuse.h>
-#include <topi/tags.h>
 #include <tvm/target/generic_func.h>
 #include <tvm/te/operation.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/tags.h>
 
+namespace tvm {
 namespace topi {
-using namespace tvm;
+
 using namespace tvm::te;
 
 namespace x86 {
@@ -126,4 +127,5 @@ inline Schedule schedule_binary_dense(const Target& target, const Array<Tensor>&
 
 }  // namespace x86
 }  // namespace topi
-#endif  // TOPI_X86_BNN_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_X86_BNN_H_
similarity index 93%
rename from topi/include/topi/x86/default.h
rename to include/tvm/topi/x86/default.h
index 0733781..9c98560 100644 (file)
  * \file x86/default.h
  * \brief default x86 schedule
  */
-#ifndef TOPI_X86_DEFAULT_H_
-#define TOPI_X86_DEFAULT_H_
+#ifndef TVM_TOPI_X86_DEFAULT_H_
+#define TVM_TOPI_X86_DEFAULT_H_
 
-#include <topi/detail/fuse.h>
-#include <topi/tags.h>
 #include <tvm/target/generic_func.h>
 #include <tvm/te/operation.h>
 #include <tvm/te/schedule_pass.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/tags.h>
 
+namespace tvm {
 namespace topi {
-using namespace tvm;
+
 using namespace tvm::te;
 
 namespace x86 {
@@ -100,4 +101,5 @@ inline Schedule default_schedule_auto_inline(const Target& target, const Array<T
 
 }  // namespace x86
 }  // namespace topi
-#endif  // TOPI_X86_DEFAULT_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_X86_DEFAULT_H_
similarity index 91%
rename from topi/include/topi/x86/injective.h
rename to include/tvm/topi/x86/injective.h
index 069a971..16eaee6 100644 (file)
  * \file x86/injective.h
  * \brief x86 schedule for injective ops
  */
-#ifndef TOPI_X86_INJECTIVE_H_
-#define TOPI_X86_INJECTIVE_H_
+#ifndef TVM_TOPI_X86_INJECTIVE_H_
+#define TVM_TOPI_X86_INJECTIVE_H_
 
-#include <topi/detail/fuse.h>
-#include <topi/tags.h>
 #include <tvm/target/generic_func.h>
 #include <tvm/te/operation.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/tags.h>
 
+namespace tvm {
 namespace topi {
-using namespace tvm;
+
 using namespace tvm::te;
 
 namespace x86 {
@@ -80,4 +81,5 @@ inline Schedule schedule_injective(const Target& target, const Array<Tensor>& ou
 
 }  // namespace x86
 }  // namespace topi
-#endif  // TOPI_X86_INJECTIVE_H_
+}  // namespace tvm
+#endif  // TVM_TOPI_X86_INJECTIVE_H_
index 76f92be..fa01860 100644 (file)
@@ -20,7 +20,7 @@ import logging
 from abc import abstractmethod
 
 import numpy as np
-import topi
+from tvm import topi
 
 import tvm
 from tvm import te
index 67ebda4..15d4534 100644 (file)
@@ -118,7 +118,7 @@ def extract_from_multiple_program(mods, params, target, target_host=None, ops=No
     """
     # pylint: disable=import-outside-toplevel
     from tvm import relay
-    import topi
+    from tvm import topi
 
     env = TaskExtractEnv.get()
 
index c3da195..c86d176 100644 (file)
@@ -22,7 +22,7 @@ import numpy as np
 
 import tvm
 from tvm.ir import IRModule
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
 
 from .. import expr as _expr
 from .. import function as _function
index 327bcd4..9a353f3 100644 (file)
@@ -23,7 +23,7 @@ import tvm
 from tvm.ir import IRModule
 
 from tvm import relay
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
 from .. import analysis
 from .. import expr as _expr
 from .. import function as _function
index a06b0ca..24f1b8b 100644 (file)
@@ -27,7 +27,7 @@ import tvm
 
 from tvm.ir import IRModule
 from tvm.relay.prelude import Prelude, StaticTensorArrayOps, get_tensor_array_shape
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
 
 from .. import analysis
 from .. import expr as _expr
index 0eeeb95..015f5ad 100644 (file)
@@ -19,7 +19,7 @@ from __future__ import absolute_import
 
 from tvm.runtime import convert
 from tvm.te.hybrid import script
-from topi.util import get_const_int, get_const_tuple
+from tvm.topi.util import get_const_int, get_const_tuple
 from . import op as _reg
 
 _reg.register_reduce_schedule("argmax")
index 2ca2a01..28336cf 100644 (file)
@@ -18,7 +18,7 @@
 """Backend compiler related feature registration"""
 
 from tvm.te.hybrid import script
-import topi
+from tvm import topi
 
 from .op import register_compute, register_shape_func
 from .op import register_broadcast_schedule, register_injective_schedule
index 3e87f60..aee8603 100644 (file)
@@ -18,8 +18,8 @@
 """Backend compiler related feature registration"""
 from __future__ import absolute_import
 
-from topi.nn.util import get_pad_tuple
-from topi.util import get_const_tuple
+from tvm.topi.nn.util import get_pad_tuple
+from tvm.topi.util import get_const_tuple
 
 from ..expr import Tuple, TupleGetItem, const
 from . import nn as _nn
index 4e113f7..a69eb8c 100644 (file)
@@ -21,8 +21,8 @@ import tvm
 from tvm import te
 from tvm.te.hybrid import script
 from tvm.runtime import convert
-import topi
-from topi.util import get_const_int, get_const_tuple
+from tvm import topi
+from tvm.topi.util import get_const_int, get_const_tuple
 from . import op as _reg
 from . import strategy
 from .op import OpPattern
index dc28359..371e4ad 100644 (file)
@@ -17,7 +17,7 @@
 #pylint: disable=invalid-name, unused-argument, len-as-condition
 """Backend compiler related feature registration for dynamic ops"""
 
-import topi
+from tvm import topi
 
 from ..op import register_shape_func, register_compute
 from ..op import register_broadcast_schedule
index 795844f..2cc3588 100644 (file)
@@ -21,8 +21,8 @@ from __future__ import absolute_import
 from tvm.te.hybrid import script
 from tvm.runtime import convert
 
-import topi
-from topi.util import get_const_tuple
+from tvm import topi
+from tvm.topi.util import get_const_tuple
 from .. import op as reg
 from .. import strategy
 from ..op import OpPattern
index 62889e0..99a6a0f 100644 (file)
@@ -74,8 +74,8 @@ def resize3d(data,
 
     This operator takes data as input and does 3D scaling to the given scale factor.
     In the default case, where the data_layout is `NCDHW`
-    with data of shape (n, c, d, h, w)
-    out will have a shape (n, c, size[0], size[1], size[2])
+    with data of shape `(n, c, d, h, w)`
+    out will have a shape `(n, c, size[0], size[1], size[2])`
 
     method indicates the algorithm to be used while calculating the out value
     and method can be one of ("trilinear", "nearest_neighbor")
index cea592a..2f0966c 100644 (file)
@@ -18,8 +18,8 @@
 """Backend compiler related feature registration"""
 from __future__ import absolute_import
 
-import topi
-from topi.util import get_const_tuple
+from tvm import topi
+from tvm.topi.util import get_const_tuple
 
 from tvm.runtime import convert
 from tvm.te.hybrid import script
index 6ede0be..b2df850 100644 (file)
@@ -1387,7 +1387,7 @@ def prelu(data, alpha, axis=1):
 
     .. math::
 
-        `y = x > 0 ? x : alpha * x`
+        y = x > 0 ? x : alpha * x
 
     Parameters
     ----------
@@ -2423,14 +2423,14 @@ def bitpack(data,
             bit_axis=2,
             pack_type="uint32",
             name="BitPack"):
-    r"""Tensor packing for bitserial operations.
+    """Tensor packing for bitserial operations.
+
     The values along the input tensor's pack_axis are quantized
-    and packed together into the specified pack_type in a new
-    bit axis.
+    and packed together into the specified pack_type in a new bit axis.
 
-    For example, consider bitpacking with data to be a tensor with shape [1, 64, 128, 128],
+    For example, consider bitpacking with data to be a tensor with shape `[1, 64, 128, 128]`,
     pack_axis=1, bit_axis=4, pack_type=uint8, and bits=2. The output in this case will
-    be of shape [1, 8, 128, 128, 2]. The dimension of axis 1 has been reduced by a factor
+    be of shape `[1, 8, 128, 128, 2]`. The dimension of axis 1 has been reduced by a factor
     of 8 since each value is packed into an 8-bit uint8. Axis 4 is now two bitplanes
     representing the quantized value of the incoming data. The output tensor is now
     ready to be used in a bitserial operation.
index 8457965..8143cc5 100644 (file)
@@ -19,7 +19,7 @@
 import re
 import logging
 
-import topi
+from tvm import topi
 from ....target import arm_isa
 from .generic import *
 from .. import op as _op
index a96463f..c975c36 100644 (file)
@@ -17,7 +17,7 @@
 """Definition of bifrost operator strategy."""
 # pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
 import re
-import topi
+from tvm import topi
 from .generic import *
 from .. import op as _op
 
index d626a9d..21c3c83 100644 (file)
@@ -16,7 +16,7 @@
 # under the License.
 """Definition of CUDA/GPU operator strategy."""
 # pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
-import topi
+from tvm import topi
 import tvm
 from tvm.te import SpecializedCondition
 from tvm.contrib import nvcc
index 62c2948..bc54577 100644 (file)
@@ -19,8 +19,8 @@
 import logging
 
 import re
-import topi
-from topi.util import get_const_int, get_const_float, get_const_tuple, get_float_tuple
+from tvm import topi
+from tvm.topi.util import get_const_int, get_const_float, get_const_tuple, get_float_tuple
 from .. import op as _op
 from ....target import generic_func, override_native_generic_func
 
index d41e85f..90495fb 100644 (file)
@@ -16,7 +16,7 @@
 # under the License.
 """Definition of HLS operator strategy."""
 # pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
-import topi
+from tvm import topi
 from .generic import *
 from .. import op as _op
 
index 0ea8d85..568cbff 100644 (file)
@@ -16,7 +16,7 @@
 # under the License.
 """Definition of x86 operator strategy."""
 # pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
-import topi
+from tvm import topi
 from .generic import *
 from .. import op as _op
 
index 5e4a7e5..84af203 100644 (file)
@@ -17,7 +17,7 @@
 """Definition of mali operator strategy."""
 # pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
 import re
-import topi
+from tvm import topi
 from .generic import *
 from .. import op as _op
 
index a80b6ca..e70298a 100644 (file)
@@ -16,7 +16,7 @@
 # under the License.
 """Definition of ROCm operator strategy."""
 # pylint: disable=invalid-name,unused-argument,unused-wildcard-import,wildcard-import
-import topi
+from tvm import topi
 from .generic import *
 from .. import op as _op
 
index b02db41..eb5b5a5 100644 (file)
@@ -19,7 +19,7 @@
 import logging
 
 import re
-import topi
+from tvm import topi
 from tvm.te import SpecializedCondition
 from .generic import *
 from .. import op as _op
index 16468e5..6f5097d 100644 (file)
@@ -16,8 +16,8 @@
 # under the License.
 # pylint: disable=invalid-name, unused-argument
 """Faster R-CNN and Mask R-CNN operations."""
-import topi
-from topi.util import get_const_tuple
+from tvm import topi
+from tvm.topi.util import get_const_tuple
 from .. import op as reg
 from .. import strategy
 from ..op import OpPattern
index f6c4f81..c94cb5a 100644 (file)
@@ -18,7 +18,7 @@
 """Definition of vision ops"""
 from __future__ import absolute_import
 
-import topi
+from tvm import topi
 from tvm.te.hybrid import script
 from .. import op as reg
 from .. import strategy
index 952a864..0bccacd 100644 (file)
@@ -17,7 +17,7 @@
 #pylint: disable=unused-argument,inconsistent-return-statements
 """Internal module for registering attribute for annotation."""
 import warnings
-import topi
+from tvm import topi
 import tvm._ffi
 from tvm.relay.op import op as _reg
 from .. import expr as _expr
index 891d7ba..213a48e 100644 (file)
@@ -14,7 +14,7 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-"""Internal utilities for parsing Python subset to HalideIR"""
+"""Internal utilities for parsing Python subset to TIR"""
 
 import ast
 import inspect
similarity index 97%
rename from topi/python/topi/__init__.py
rename to python/tvm/topi/__init__.py
index f308aa6..c17b6fd 100644 (file)
@@ -24,8 +24,6 @@ for constructing compute declaration as well as optimized schedules.
 Some of the schedule function may have been specially optimized for a
 specific workload.
 """
-from __future__ import absolute_import as _abs
-
 from tvm._ffi.libinfo import __version__
 
 # Ensure C++ schedules get registered first, so python schedules can
similarity index 99%
rename from topi/python/topi/arm_cpu/bitserial_dense.py
rename to python/tvm/topi/arm_cpu/bitserial_dense.py
index beed79d..c7aa567 100644 (file)
@@ -20,7 +20,7 @@ from __future__ import absolute_import as _abs
 import tvm
 from tvm import te
 from tvm import autotvm
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
 from .. import tag
 from .bitserial_conv2d import _intrin_popcount
 from ..nn.pad import pad
similarity index 99%
rename from topi/python/topi/arm_cpu/conv2d_gemm.py
rename to python/tvm/topi/arm_cpu/conv2d_gemm.py
index e97de56..c8e1a5a 100644 (file)
@@ -19,7 +19,7 @@
 """GEMM Convolution schedule on ARM"""
 import tvm
 from tvm import te
-from topi import nn
+from tvm.topi import nn
 from ..util import get_const_tuple
 from ..nn.util import get_pad_tuple
 from .tensor_intrin import gemv_quantized, gemv_quantized_impl
@@ -20,8 +20,8 @@
 import tvm
 from tvm import autotvm
 from tvm.autotvm.task import deserialize_args
-from topi.nn.conv2d import conv2d_nchw, conv2d_nhwc
-from topi.util import get_const_tuple, get_const_int, traverse_inline
+from tvm.topi.nn.conv2d import conv2d_nchw, conv2d_nhwc
+from tvm.topi.util import get_const_tuple, get_const_int, traverse_inline
 
 def conv2d_direct(*args, **kwargs):
     """Schedule function for directly-scheduled conv2d."""
@@ -20,9 +20,9 @@
 from tvm import autotvm
 from tvm.autotvm.task import deserialize_args
 from tvm import te
-from topi.util import simplify, traverse_inline
-from topi.nn.pad import pad
-from topi.nn.util import get_pad_tuple
+from tvm.topi.util import simplify, traverse_inline
+from tvm.topi.nn.pad import pad
+from tvm.topi.nn.util import get_pad_tuple
 
 from ..micro_kernel.gemm import (
         intrin_gemm_MxKxN, gemm_MxKxN_impl,
similarity index 94%
rename from topi/python/topi/cpp/cuda.py
rename to python/tvm/topi/cpp/cuda.py
index efc31e8..ce2efa9 100644 (file)
@@ -17,4 +17,4 @@
 """FFI for CUDA TOPI ops and schedules"""
 import tvm._ffi
 
-tvm._ffi._init_api("topi.cuda", "topi.cpp.cuda")
+tvm._ffi._init_api("topi.cuda", "tvm.topi.cpp.cuda")
similarity index 93%
rename from topi/python/topi/cpp/generic.py
rename to python/tvm/topi/cpp/generic.py
index e6bf250..d314eca 100644 (file)
@@ -17,4 +17,4 @@
 """FFI for generic TOPI ops and schedules"""
 import tvm._ffi
 
-tvm._ffi._init_api("topi.generic", "topi.cpp.generic")
+tvm._ffi._init_api("topi.generic", "tvm.topi.cpp.generic")
diff --git a/python/tvm/topi/cpp/impl.py b/python/tvm/topi/cpp/impl.py
new file mode 100644 (file)
index 0000000..2c877c3
--- /dev/null
@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""Load Lib for C++ TOPI ops and schedules"""
+import tvm._ffi
+
+tvm._ffi._init_api("topi", "tvm.topi.cpp")
similarity index 94%
rename from topi/python/topi/cpp/nn.py
rename to python/tvm/topi/cpp/nn.py
index d11aa27..0e3cee7 100644 (file)
@@ -17,4 +17,4 @@
 """FFI for NN TOPI ops and schedules"""
 import tvm._ffi
 
-tvm._ffi._init_api("topi.nn", "topi.cpp.nn")
+tvm._ffi._init_api("topi.nn", "tvm.topi.cpp.nn")
similarity index 94%
rename from topi/python/topi/cpp/rocm.py
rename to python/tvm/topi/cpp/rocm.py
index c001a61..eab5110 100644 (file)
@@ -17,4 +17,4 @@
 """FFI for Rocm TOPI ops and schedules"""
 import tvm._ffi
 
-tvm._ffi._init_api("topi.rocm", "topi.cpp.rocm")
+tvm._ffi._init_api("topi.rocm", "tvm.topi.cpp.rocm")
similarity index 94%
rename from topi/python/topi/cpp/util.py
rename to python/tvm/topi/cpp/util.py
index cc76dd9..ca0b86e 100644 (file)
@@ -17,4 +17,4 @@
 """FFI for TOPI utility functions"""
 import tvm._ffi
 
-tvm._ffi._init_api("topi.util", "topi.cpp.util")
+tvm._ffi._init_api("topi.util", "tvm.topi.cpp.util")
similarity index 93%
rename from topi/python/topi/cpp/vision/__init__.py
rename to python/tvm/topi/cpp/vision/__init__.py
index 6034e27..000602f 100644 (file)
@@ -20,4 +20,4 @@ import tvm._ffi
 
 from . import yolo
 
-tvm._ffi._init_api("topi.vision", "topi.cpp.vision")
+tvm._ffi._init_api("topi.vision", "tvm.topi.cpp.vision")
similarity index 92%
rename from topi/python/topi/cpp/vision/yolo.py
rename to python/tvm/topi/cpp/vision/yolo.py
index ff12498..17e2327 100644 (file)
@@ -17,4 +17,4 @@
 """FFI for Yolo TOPI ops and schedules"""
 import tvm._ffi
 
-tvm._ffi._init_api("topi.vision.yolo", "topi.cpp.vision.yolo")
+tvm._ffi._init_api("topi.vision.yolo", "tvm.topi.cpp.vision.yolo")
similarity index 94%
rename from topi/python/topi/cpp/x86.py
rename to python/tvm/topi/cpp/x86.py
index 0681ffe..0034af0 100644 (file)
@@ -17,4 +17,4 @@
 """FFI for x86 TOPI ops and schedules"""
 import tvm._ffi
 
-tvm._ffi._init_api("topi.x86", "topi.cpp.x86")
+tvm._ffi._init_api("topi.x86", "tvm.topi.cpp.x86")
similarity index 99%
rename from topi/python/topi/cuda/ssd/multibox.py
rename to python/tvm/topi/cuda/ssd/multibox.py
index 22d7443..541af06 100644 (file)
@@ -21,7 +21,7 @@ import tvm
 from tvm import te
 from tvm.tir import if_then_else, exp
 
-import topi
+from tvm import topi
 
 from ..nms import non_max_suppression
 
similarity index 99%
rename from topi/python/topi/image/dilation2d.py
rename to python/tvm/topi/image/dilation2d.py
index 074ca6c..dd16a21 100644 (file)
@@ -19,7 +19,7 @@
 """Dilation2D operators"""
 from __future__ import absolute_import as _abs
 from tvm import te
-from topi.util import simplify
+from tvm.topi.util import simplify
 from ..nn.pad import pad
 from ..nn.util import get_pad_tuple
 
similarity index 99%
rename from topi/python/topi/image/resize.py
rename to python/tvm/topi/image/resize.py
index d901bab..d6c0845 100644 (file)
@@ -19,7 +19,7 @@
 from __future__ import absolute_import
 import tvm
 from tvm import te
-from topi.util import nchw_pack_layout, nchw_xc_layout
+from tvm.topi.util import nchw_pack_layout, nchw_xc_layout
 from .. import tag
 
 def get_2d_indices(indices, layout='NCHW'):
@@ -664,6 +664,7 @@ def crop_and_resize(data, boxes, box_indices, crop_size, layout="NCHW",
 def resize3d(data, size, layout="NCDHW", method="nearest_neighbor",
              coordinate_transformation_mode="align_corners", out_dtype=None):
     """Perform resize operation on the data.
+
     Parameters
     ----------
     inputs: tvm.te.Tensor
@@ -683,6 +684,7 @@ def resize3d(data, size, layout="NCDHW", method="nearest_neighbor",
         Method to be used for resizing.
     out_dtype: string, optional
         Type to return. If left None will be same as input type.
+
     Returns
     -------
     output : tvm.te.Tensor
similarity index 98%
rename from topi/python/topi/nn/bitserial_dense.py
rename to python/tvm/topi/nn/bitserial_dense.py
index 10635d8..97d1fb2 100644 (file)
@@ -19,7 +19,7 @@
 from __future__ import absolute_import
 import tvm
 from tvm import te
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
 from .bitserial_util import bitpack
 
 def bitserial_dense(data, weight, data_bits, weight_bits, pack_dtype='uint32',
similarity index 95%
rename from topi/python/topi/nn/bitserial_util.py
rename to python/tvm/topi/nn/bitserial_util.py
index a25aa91..2b320b8 100644 (file)
 import numpy as np
 import tvm
 from tvm import te
-from topi.transform import concatenate
+from tvm.topi.transform import concatenate
 from ..util import get_const_int
 
 def bitpack(data, bits, pack_axis, bit_axis, pack_type, name="QuantizeInput"):
     """Packs data into format necessary for bitserial computation
+
+    Parameters
+    ----------
     pack_axis : int
        index of the axis to pack in data
     bit_axis : int
-       index of axis to place bit axis in resulting packed data"""
+       index of axis to place bit axis in resulting packed data
+    """
     ishape = data.shape
     n = len(ishape)
     if pack_type == 'uint8':
similarity index 99%
rename from topi/python/topi/nn/conv2d.py
rename to python/tvm/topi/nn/conv2d.py
index 51de454..d3be6bb 100644 (file)
@@ -673,13 +673,15 @@ def conv2d_winograd_weight_transform(kernel, tile_size):
 
 def conv2d_winograd_nnpack_weight_transform(kernel, convolution_algorithm, out_dtype):
     """Weight transformation for winograd
-     Parameters
+
+    Parameters
     ----------
     kernel: Tensor
         The raw kernel tensor with layout "NCHW". Only 3x3 kernel is supported for now.
     convolution_algorithm: int
         The convolution algorithm for Winograd NNPACK.
-     Returns
+
+    Returns
     -------
     output : tvm.te.Tensor
         4-D with shape [alpha, alpha, CO, CI]
@@ -771,8 +773,8 @@ def group_conv2d_nchw(Input, Filter, stride, padding, dilation, groups, out_dtyp
 def unpack_NCHWc_to_nchw(packed_out, out_dtype):
     """Unpack conv2d_NCHWc output from layout NCHWc to NCHW
 
-     Parameters
-    -----------
+    Parameters
+    ----------
     packed_out : tvm.te.Tensor
         The output tensor of conv2d_NCHWc.
 
similarity index 95%
rename from topi/python/topi/nn/elemwise.py
rename to python/tvm/topi/nn/elemwise.py
index 1315a48..e851c64 100644 (file)
@@ -63,12 +63,14 @@ def leaky_relu(x, alpha):
 
 @tvm.te.tag_scope(tag=tag.BROADCAST)
 def prelu(x, slope, axis=1):
-    """ PReLU.
+    """PReLU.
     It accepts two arguments: an input ``x`` and a weight array ``W``
     and computes the output as :math:`PReLU(x) y = x > 0 ? x : W * x`,
     where :math:`*` is an elementwise multiplication for each sample in the
     batch.
-    Arguments:
+
+    Parameters
+    ----------
     x : tvm.te.Tensor
         Input argument.
 
@@ -78,12 +80,14 @@ def prelu(x, slope, axis=1):
     axis : int
         The axis where the channel data needs to be applied
 
-    Returns:
+    Returns
+    -------
     y : tvm.te.Tensor
         The result.
 
-    Links:
-        [http://arxiv.org/pdf/1502.01852v1.pdf]
+    Links
+    -----
+    [http://arxiv.org/pdf/1502.01852v1.pdf]
     """
 
     assert len(slope.shape) == 1
similarity index 99%
rename from topi/python/topi/nn/upsampling.py
rename to python/tvm/topi/nn/upsampling.py
index 008e52e..96a13ef 100644 (file)
@@ -15,7 +15,7 @@
 # specific language governing permissions and limitations
 # under the License.
 """TVM operator upsampling compute."""
-import topi
+from tvm import topi
 from tvm import te
 from ..util import simplify
 
@@ -18,7 +18,7 @@
 """Bilinear Scale in python"""
 import math
 import numpy as np
-from topi.util import nchw_pack_layout
+from tvm.topi.util import nchw_pack_layout
 
 def bilinear_resize_python(image, out_size, layout, coordinate_transformation_mode="align_corners"):
     """ Bilinear scaling using python"""
similarity index 99%
rename from topi/python/topi/testing/common.py
rename to python/tvm/topi/testing/common.py
index 7bc5c5d..721493e 100644 (file)
@@ -18,7 +18,7 @@
 """Common utility for topi test"""
 
 import tvm
-import topi
+from tvm import topi
 
 _injective_schedule = {
     "generic": topi.generic.schedule_injective,
similarity index 98%
rename from topi/python/topi/testing/conv1d_ncw_python.py
rename to python/tvm/topi/testing/conv1d_ncw_python.py
index 90ee7de..84a463f 100644 (file)
@@ -17,7 +17,7 @@
 # pylint: disable=unused-variable, invalid-name
 """1D convolution in python"""
 import numpy as np
-from topi.nn.util import get_pad_tuple1d
+from tvm.topi.nn.util import get_pad_tuple1d
 
 
 def dilate_np(x, dilation):
@@ -18,8 +18,8 @@
 """Transposed 1D convolution in python"""
 import numpy as np
 import scipy
-import topi
-from topi.nn.util import get_pad_tuple1d
+import tvm.topi.testing
+from tvm.topi.nn.util import get_pad_tuple1d
 
 def conv1d_transpose_ncw_python(a_np, w_np, stride, padding, output_padding):
     """Transposed 1D convolution operator in NCW layout.
@@ -60,7 +60,7 @@ def conv1d_transpose_ncw_python(a_np, w_np, stride, padding, output_padding):
     assert opad < stride_w
     fpad_left, fpad_right = get_pad_tuple1d(padding, filter_w)
     # dilate stage
-    dilated_a_np = topi.testing.dilate_python(a_np, [1, 1, stride_w])
+    dilated_a_np = tvm.topi.testing.dilate_python(a_np, [1, 1, stride_w])
     # padding stage
     bpad_left = filter_w - 1 - fpad_left
     bpad_right = filter_w - 1 - fpad_right + opad
@@ -18,7 +18,7 @@
 """Convolution in python"""
 import numpy as np
 import scipy.signal
-from topi.nn.util import get_pad_tuple
+from tvm.topi.nn.util import get_pad_tuple
 
 
 def conv2d_hwcn_python(a_np, w_np, stride, padding):
@@ -18,7 +18,7 @@
 """Convolution in python"""
 import numpy as np
 import scipy.signal
-from topi.nn.util import get_pad_tuple
+from tvm.topi.nn.util import get_pad_tuple
 
 
 def _conv2d_nchw_python(a_np, w_np, stride, padding):
@@ -18,7 +18,7 @@
 """Convolution in python"""
 import numpy as np
 import scipy.signal
-from topi.nn.util import get_pad_tuple
+from tvm.topi.nn.util import get_pad_tuple
 
 
 def _conv2d_nhwc_python(a_np, w_np, stride, padding):
@@ -18,8 +18,8 @@
 """Transposed convolution in python"""
 import numpy as np
 import scipy
-import topi
-from topi.nn.util import get_pad_tuple
+import tvm.topi.testing
+from tvm.topi.nn.util import get_pad_tuple
 
 
 def conv2d_transpose_nchw_python(a_np, w_np, stride, padding, output_padding):
@@ -59,7 +59,7 @@ def conv2d_transpose_nchw_python(a_np, w_np, stride, padding, output_padding):
         opad_h, opad_w = output_padding
     assert opad_h < stride_h and opad_w < stride_w
     # dilate stage
-    dilated_a_np = topi.testing.dilate_python(a_np, [1, 1, stride_h, stride_w])
+    dilated_a_np = tvm.topi.testing.dilate_python(a_np, [1, 1, stride_h, stride_w])
     # padding stage
     fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(padding, (filter_h, filter_w))
     bpad_top = filter_h - 1 - fpad_top
@@ -18,7 +18,7 @@
 """Convolution 3D in python"""
 import numpy as np
 import scipy.signal
-from topi.nn.util import get_pad_tuple3d
+from tvm.topi.nn.util import get_pad_tuple3d
 
 
 def _conv3d_ncdhw_python(a_np, w_np, stride, padding):
@@ -18,7 +18,7 @@
 """Convolution 3D in python"""
 import numpy as np
 import scipy.signal
-from topi.nn.util import get_pad_tuple3d
+from tvm.topi.nn.util import get_pad_tuple3d
 
 
 def conv3d_ndhwc_python(a_np, w_np, stride, padding):
@@ -17,8 +17,8 @@
 # pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals, too-many-branches
 """Convolution 3D transpose in python"""
 import numpy as np
-import topi
-from topi.nn.util import get_pad_tuple3d
+import tvm.topi.testing
+from tvm.topi.nn.util import get_pad_tuple3d
 
 
 def conv3d_transpose_ncdhw_python(a_np, w_np, stride, padding):
@@ -51,7 +51,7 @@ def conv3d_transpose_ncdhw_python(a_np, w_np, stride, padding):
         stride_d, stride_h, stride_w = stride
 
     # dilate stage
-    dilated_a_np = topi.testing.dilate_python(a_np, [1, 1, stride_d, stride_h, stride_w])
+    dilated_a_np = tvm.topi.testing.dilate_python(a_np, [1, 1, stride_d, stride_h, stride_w])
 
     # padding stage
     fpad_front, fpad_top, fpad_left, fpad_back, fpad_bottom, fpad_right = get_pad_tuple3d(
@@ -81,6 +81,6 @@ def conv3d_transpose_ncdhw_python(a_np, w_np, stride, padding):
     out_w = (in_w - 1) * stride_w - fpad_left - fpad_right + filter_w
 
     w_np = np.flip(w_np, axis=[2, 3, 4]).transpose((1, 0, 2, 3, 4))
-    b_np = topi.testing.conv3d_ncdhw_python(padded_a_np, w_np, stride=(1, 1, 1), padding=(0, 0, 0))
+    b_np = tvm.topi.testing.conv3d_ncdhw_python(padded_a_np, w_np, stride=(1, 1, 1), padding=(0, 0, 0))
 
     return b_np
@@ -18,7 +18,7 @@
 """Deformable convolution in python"""
 import itertools
 import numpy as np
-from topi.nn.util import get_pad_tuple
+from tvm.topi.nn.util import get_pad_tuple
 
 def deformable_conv2d_nchw_python(a_np, offset_np, w_np, stride, padding, dilation,
                                   deformable_groups, groups):
similarity index 99%
rename from topi/python/topi/testing/upsampling_python.py
rename to python/tvm/topi/testing/upsampling_python.py
index f2fa80f..8cc00ad 100644 (file)
@@ -18,7 +18,7 @@
 """Upsampling in python"""
 import math
 import numpy as np
-from topi.util import nchw_pack_layout
+from tvm.topi.util import nchw_pack_layout
 
 
 def upsample_nearest(arr, scale):
similarity index 97%
rename from topi/python/topi/transform.py
rename to python/tvm/topi/transform.py
index 159412f..6bfa473 100644 (file)
@@ -19,7 +19,7 @@
 from __future__ import absolute_import as _abs
 import tvm
 from tvm import te
-import topi
+from tvm import topi
 from . import cpp
 from . import tag
 from .util import within_index, make_idx
@@ -48,19 +48,22 @@ def expand_like(a, shape_like, axis):
     This operation can always be composed of unsqueezing and
     expanding dims on those unsqueezed axes.
 
-    Examples::
-    input = [ 12.  19.  27.]
-    input.shape = (3,)
+    Examples
+    --------
+    .. code-block::
 
-    new_shape_array = [[[1,2],[2,3],[1,3]],
-                      [[1,4],[4,3],[5,2]],
-                      [[7,1],[7,2],[7,3]]]
-    new_shape_array.shape = (3, 3, 2)
+        input = [ 12.  19.  27.]
+        input.shape = (3,)
 
-    expand_like(input, [1,2], new_shape_array) =
-                      [[[12,12],[12,12],[12,12]],
-                      [[19,19],[19,19],[19,19]],
-                      [[27,27],[27,27],[27,27]]]
+        new_shape_array = [[[1,2],[2,3],[1,3]],
+                        [[1,4],[4,3],[5,2]],
+                        [[7,1],[7,2],[7,3]]]
+        new_shape_array.shape = (3, 3, 2)
+
+        expand_like(input, [1,2], new_shape_array) =
+                        [[[12,12],[12,12],[12,12]],
+                        [[19,19],[19,19],[19,19]],
+                        [[27,27],[27,27],[27,27]]]
 
     Parameters
     ----------
@@ -70,6 +73,7 @@ def expand_like(a, shape_like, axis):
         The tensor to with target shape.
     axis: list of int
         axis to be expanded on
+
     Returns
     -------
     ret : tvm.te.Tensor
@@ -458,7 +462,7 @@ def gather_nd(a, indices):
 def matmul(a, b, transp_a=False, transp_b=False):
     """
     Creates an operation that calculates a matrix multiplication (row-major notation):
-        A(i, k) * B(k, j)
+    A(i, k) * B(k, j)
     if trans_a == trans_b, the usual transposed combinations, otherwise
 
     Parameters
similarity index 99%
rename from topi/python/topi/util.py
rename to python/tvm/topi/util.py
index cc43732..5bde1cb 100644 (file)
@@ -365,8 +365,8 @@ def get_shape(src_shape, src_layout, dst_layout):
 def within_index(b, e, s, i):
     """Return a boolean value that indicates if i is within the given index.
 
-    Parameter
-    ---------
+    Parameters
+    ----------
     b : Expr
       beginning of the index
 
@@ -400,8 +400,8 @@ def make_idx(b, e, s, z, i):
     The returned value is only meaningful if within_index() returns True
     for the same set of parameters.
 
-    Parameter
-    ---------
+    Parameters
+    ----------
     b : Expr
       beginning of the index
 
similarity index 99%
rename from topi/python/topi/vision/ssd/multibox.py
rename to python/tvm/topi/vision/ssd/multibox.py
index e5b9215..6534503 100644 (file)
@@ -21,7 +21,7 @@ import tvm
 from tvm.te import hybrid
 from tvm.tir import exp, sqrt
 
-import topi
+from tvm import topi
 
 from ..nms import non_max_suppression
 
similarity index 99%
rename from topi/python/topi/x86/bitserial_dense.py
rename to python/tvm/topi/x86/bitserial_dense.py
index cbc6ac8..8d5736b 100644 (file)
@@ -20,7 +20,7 @@ from __future__ import absolute_import as _abs
 import tvm
 from tvm import te
 from tvm import autotvm
-from topi.util import get_const_int, get_const_tuple
+from tvm.topi.util import get_const_int, get_const_tuple
 from .. import tag
 from ..nn.bitserial_util import bitpack, binary_op_multiplier
 
index 5209fac..7d8e867 100644 (file)
@@ -34,13 +34,7 @@ fn test_load_graph() {
     let output = std::process::Command::new(mf_dir!("/tests/build_model.py"))
         .env(
             "PYTHONPATH",
-            concat!(
-                mf_dir!("/../../python"),
-                ":",
-                mf_dir!("/../../nnvm/python"),
-                ":",
-                mf_dir!("/../../topi/python")
-            ),
+            concat!(mf_dir!("/../../python"), ":", mf_dir!("/../../nnvm/python")),
         )
         .output()
         .expect("Failed to build test model");
index 3c4faf7..9ee5727 100644 (file)
@@ -23,7 +23,6 @@
  */
 #include "compile_engine.h"
 
-#include <topi/tags.h>
 #include <tvm/driver/driver_api.h>
 #include <tvm/ir/type_functor.h>
 #include <tvm/relay/analysis.h>
@@ -37,6 +36,7 @@
 #include <tvm/te/operation.h>
 #include <tvm/te/schedule.h>
 #include <tvm/te/schedule_pass.h>
+#include <tvm/topi/tags.h>
 
 #include <functional>
 #include <limits>
index 6be9b0d..d3eb4f9 100644 (file)
  * \brief Registration of annotation operators.
  */
 
-#include <topi/elemwise.h>
 #include <tvm/relay/attrs/annotation.h>
 #include <tvm/relay/expr.h>
 #include <tvm/relay/op.h>
 #include <tvm/relay/op_attr_types.h>
 #include <tvm/tir/expr.h>
+#include <tvm/topi/elemwise.h>
 
 #include "../../transforms/infer_layout_util.h"
 #include "../type_relations.h"
index 56b7d44..4b5e7d9 100644 (file)
  * \brief Property def of nn operators.
  */
 
-#include <topi/elemwise.h>
 #include <tvm/relay/attrs/debug.h>
 #include <tvm/relay/op.h>
 #include <tvm/tir/data_layout.h>
+#include <tvm/topi/elemwise.h>
 
 #include <vector>
 
index 007b3dd..2bb87ac 100644 (file)
  */
 #include "transform.h"
 
-#include <topi/broadcast.h>
-#include <topi/transform.h>
 #include <tvm/relay/attrs/transform.h>
 #include <tvm/relay/op.h>
 #include <tvm/relay/op_attr_types.h>
 #include <tvm/runtime/registry.h>
+#include <tvm/topi/broadcast.h>
+#include <tvm/topi/transform.h>
 
 #include <utility>
 #include <vector>
@@ -89,7 +89,7 @@ TVM_REGISTER_GLOBAL("relay.op.dyn._make.reshape").set_body_typed(MakeReshape);
 
 RELAY_REGISTER_OP("dyn.reshape")
     .describe(R"code(Reshapes the input array based on the values in the newshape array.
-    
+
     To give user more convenience in without doing manual shape inference,
     some dimensions of the shape can take special values from the set {0, -1, -3}.
     The significance of each is explained below:
@@ -120,7 +120,7 @@ RELAY_REGISTER_OP("dyn.reshape")
             data.shape = (2,3,4,5), newshape = (-3,-3), result.shape = (6,20)
             data.shape = (2,3,4), newshape = (0,-3), result.shape = (2,12)
 
-    Special values -2 and -4 from the standard reshape op would introduce dynamic rank 
+    Special values -2 and -4 from the standard reshape op would introduce dynamic rank
     in this op. Thus, they are not permitted.
 
     )code" TVM_ADD_FILELINE)
index de73b44..7710245 100644 (file)
  * \brief Operators for manifest shape-aware memory allocation in Relay.
  */
 
-#include <topi/elemwise.h>
 #include <tvm/relay/attrs/memory.h>
 #include <tvm/relay/expr.h>
 #include <tvm/relay/op.h>
 #include <tvm/relay/op_attr_types.h>
 #include <tvm/runtime/data_type.h>
+#include <tvm/topi/elemwise.h>
 
 #include "../../transforms/infer_layout_util.h"
 #include "../op_common.h"
index 67f42b7..5970cc7 100644 (file)
  * \file correlation.cc
  * \brief Correlation operators
  */
-#include <topi/nn.h>
 #include <tvm/relay/attrs/nn.h>
 #include <tvm/relay/op.h>
 #include <tvm/tir/data_layout.h>
 #include <tvm/tir/op.h>
+#include <tvm/topi/nn.h>
 
 #include <vector>
 
index 7013c02..19348c0 100644 (file)
 
 #include "nn.h"
 
-#include <topi/nn.h>
-#include <topi/nn/bias_add.h>
-#include <topi/nn/flatten.h>
-#include <topi/nn/softmax.h>
 #include <tvm/relay/attrs/image.h>
 #include <tvm/relay/attrs/nn.h>
 #include <tvm/relay/op.h>
 #include <tvm/tir/data_layout.h>
+#include <tvm/topi/nn.h>
+#include <tvm/topi/nn/bias_add.h>
+#include <tvm/topi/nn/flatten.h>
+#include <tvm/topi/nn/softmax.h>
 
 #include <string>
 #include <vector>
index 52259c5..d710360 100644 (file)
  * \file pad.cc
  * \brief Implementation of operator pad
  */
-#include <topi/nn.h>
 #include <tvm/relay/attrs/nn.h>
 #include <tvm/relay/op.h>
 #include <tvm/tir/data_layout.h>
 #include <tvm/tir/op.h>
+#include <tvm/topi/nn.h>
 
 #include <vector>
 
index 63f0ce5..1e53060 100644 (file)
  */
 #include "pooling.h"
 
-#include <topi/nn/pooling.h>
 #include <tvm/relay/attrs/nn.h>
 #include <tvm/relay/op.h>
 #include <tvm/relay/op_attr_types.h>
 #include <tvm/tir/data_layout.h>
+#include <tvm/topi/nn/pooling.h>
 
 #include <vector>
 
index 93bce15..df128ff 100644 (file)
@@ -21,9 +21,9 @@
  * \file binary.cc
  * \brief binary broadcast operators.
  */
-#include <topi/broadcast.h>
 #include <tvm/relay/expr.h>
 #include <tvm/relay/op.h>
+#include <tvm/topi/broadcast.h>
 
 #include "../op_common.h"
 #include "../type_relations.h"
index 6ffcc67..9fd1400 100644 (file)
  * \file reduce.cc
  * \brief Reduction operators.
  */
-#include <topi/elemwise.h>
-#include <topi/reduction.h>
 #include <tvm/relay/attrs/reduce.h>
 #include <tvm/relay/expr.h>
 #include <tvm/relay/op.h>
+#include <tvm/topi/elemwise.h>
+#include <tvm/topi/reduction.h>
 
 #include <limits>
 #include <numeric>
index 99a1f59..9427ded 100644 (file)
  */
 #include "transform.h"
 
-#include <topi/broadcast.h>
-#include <topi/elemwise.h>
-#include <topi/nn.h>
-#include <topi/reduction.h>
-#include <topi/transform.h>
 #include <tvm/ir/error.h>
 #include <tvm/relay/attrs/transform.h>
 #include <tvm/relay/op.h>
 #include <tvm/tir/data_layout.h>
 #include <tvm/tir/expr.h>
 #include <tvm/tir/op.h>
+#include <tvm/topi/broadcast.h>
+#include <tvm/topi/elemwise.h>
+#include <tvm/topi/nn.h>
+#include <tvm/topi/reduction.h>
+#include <tvm/topi/transform.h>
 
 #include <vector>
 
index 5809798..938142f 100644 (file)
  * \file unary.cc
  * \brief Unary operators.
  */
-#include <topi/elemwise.h>
-#include <topi/transform.h>
 #include <tvm/relay/attrs/transform.h>
 #include <tvm/relay/expr.h>
 #include <tvm/relay/op.h>
+#include <tvm/topi/elemwise.h>
+#include <tvm/topi/transform.h>
 
 #include "../make_op.h"
 #include "../op_common.h"
index e54473f..cfd8113 100644 (file)
@@ -21,9 +21,9 @@
  * \file yolo.cc
  * \brief Yolo related operators
  */
-#include <topi/vision/reorg.h>
 #include <tvm/relay/attrs/vision.h>
 #include <tvm/relay/op.h>
+#include <tvm/topi/vision/reorg.h>
 
 #include <vector>
 
index 6e611d6..59756ea 100644 (file)
  * \brief Dialect operators for Relay VM.
  */
 
-#include <topi/elemwise.h>
 #include <tvm/relay/attrs/memory.h>
 #include <tvm/relay/attrs/vm.h>
 #include <tvm/relay/expr.h>
 #include <tvm/relay/op.h>
 #include <tvm/relay/op_attr_types.h>
 #include <tvm/runtime/data_type.h>
+#include <tvm/topi/elemwise.h>
 
 #include "../../transforms/infer_layout_util.h"
 #include "../op_common.h"
index 772213d..8b7c428 100644 (file)
  *        (3) and sum them together to get the adjoint of the input itself.
  *        The three steps are computed recursively.
  */
-#include <topi/elemwise.h>
-#include <topi/transform.h>
 #include <tvm/runtime/registry.h>
 #include <tvm/te/autodiff.h>
 #include <tvm/tir/stmt_functor.h>
+#include <tvm/topi/elemwise.h>
+#include <tvm/topi/transform.h>
 
 #include <memory>
 #include <vector>
similarity index 97%
rename from topi/src/broadcast.cc
rename to src/topi/broadcast.cc
index e13c09e..a06d914 100644 (file)
  * \brief Registration of broadcast operators
  * \file broadcast.cc
  */
-#include <topi/broadcast.h>
-#include <topi/util.h>
 #include <tvm/runtime/packed_func.h>
 #include <tvm/runtime/registry.h>
+#include <tvm/topi/broadcast.h>
+#include <tvm/topi/util.h>
 
+namespace tvm {
 namespace topi {
 
 using namespace tvm;
@@ -76,3 +77,4 @@ TVM_REGISTER_GLOBAL("topi.broadcast_to").set_body([](TVMArgs args, TVMRetValue*
 });
 
 }  // namespace topi
+}  // namespace tvm
similarity index 98%
rename from topi/src/elemwise.cc
rename to src/topi/elemwise.cc
index 10ac8f8..7a39758 100644 (file)
  * \brief Registration of elemwise operators
  * \file elemwise.cc
  */
-#include <topi/elemwise.h>
 #include <tvm/runtime/packed_func.h>
 #include <tvm/runtime/registry.h>
+#include <tvm/topi/elemwise.h>
 
+namespace tvm {
 namespace topi {
 
 using namespace tvm;
@@ -155,3 +156,4 @@ TVM_REGISTER_GLOBAL("topi.bitwise_not").set_body([](TVMArgs args, TVMRetValue* r
 });
 
 }  // namespace topi
+}  // namespace tvm
similarity index 92%
rename from topi/src/nn.cc
rename to src/topi/nn.cc
index 3ec4778..4a209b2 100644 (file)
  * \brief Registration of NN operators
  * \file nn.cc
  */
-#include <topi/nn.h>
-#include <topi/nn/batch_matmul.h>
-#include <topi/nn/bias_add.h>
-#include <topi/nn/bnn.h>
-#include <topi/nn/dense.h>
-#include <topi/nn/dilate.h>
-#include <topi/nn/flatten.h>
-#include <topi/nn/local_response_norm.h>
-#include <topi/nn/mapping.h>
-#include <topi/nn/pooling.h>
-#include <topi/nn/softmax.h>
 #include <tvm/runtime/packed_func.h>
 #include <tvm/runtime/registry.h>
-
+#include <tvm/topi/nn.h>
+#include <tvm/topi/nn/batch_matmul.h>
+#include <tvm/topi/nn/bias_add.h>
+#include <tvm/topi/nn/bnn.h>
+#include <tvm/topi/nn/dense.h>
+#include <tvm/topi/nn/dilate.h>
+#include <tvm/topi/nn/flatten.h>
+#include <tvm/topi/nn/local_response_norm.h>
+#include <tvm/topi/nn/mapping.h>
+#include <tvm/topi/nn/pooling.h>
+#include <tvm/topi/nn/softmax.h>
+
+namespace tvm {
 namespace topi {
 
 using namespace tvm;
@@ -151,3 +152,4 @@ TVM_REGISTER_GLOBAL("topi.nn.binary_dense").set_body([](TVMArgs args, TVMRetValu
 });
 
 }  // namespace topi
+}  // namespace tvm
similarity index 95%
rename from topi/src/reduction.cc
rename to src/topi/reduction.cc
index b981495..b5c6690 100644 (file)
  * \brief Registration of reduction operators
  * \file reduction.cc
  */
-#include <topi/reduction.h>
-#include <topi/util.h>
 #include <tvm/runtime/packed_func.h>
 #include <tvm/runtime/registry.h>
+#include <tvm/topi/reduction.h>
+#include <tvm/topi/util.h>
 
+namespace tvm {
 namespace topi {
 
 using namespace tvm;
@@ -64,3 +65,4 @@ TVM_REGISTER_GLOBAL("topi.any").set_body([](TVMArgs args, TVMRetValue* rv) {
 });
 
 }  // namespace topi
+}  // namespace tvm
similarity index 94%
rename from topi/src/schedule.cc
rename to src/topi/schedule.cc
index b974aca..333833a 100644 (file)
  */
 #define TOPI_REDUCE_ATLEAST1D 0
 
-#include <topi/cuda/dense.h>
-#include <topi/cuda/injective.h>
-#include <topi/cuda/normalization.h>
-#include <topi/cuda/pooling.h>
-#include <topi/cuda/reduction.h>
-#include <topi/cuda/softmax.h>
-#include <topi/detail/tensor_utils.h>
-#include <topi/generic/default.h>
-#include <topi/generic/extern.h>
-#include <topi/generic/injective.h>
-#include <topi/rocm/dense.h>
-#include <topi/rocm/injective.h>
-#include <topi/rocm/normalization.h>
-#include <topi/rocm/pooling.h>
-#include <topi/rocm/reduction.h>
-#include <topi/rocm/softmax.h>
-#include <topi/x86/bnn.h>
-#include <topi/x86/default.h>
-#include <topi/x86/injective.h>
 #include <tvm/ir/expr.h>
 #include <tvm/runtime/module.h>
 #include <tvm/runtime/packed_func.h>
 #include <tvm/runtime/registry.h>
 #include <tvm/target/generic_func.h>
-
+#include <tvm/topi/cuda/dense.h>
+#include <tvm/topi/cuda/injective.h>
+#include <tvm/topi/cuda/normalization.h>
+#include <tvm/topi/cuda/pooling.h>
+#include <tvm/topi/cuda/reduction.h>
+#include <tvm/topi/cuda/softmax.h>
+#include <tvm/topi/detail/tensor_utils.h>
+#include <tvm/topi/generic/default.h>
+#include <tvm/topi/generic/extern.h>
+#include <tvm/topi/generic/injective.h>
+#include <tvm/topi/rocm/dense.h>
+#include <tvm/topi/rocm/injective.h>
+#include <tvm/topi/rocm/normalization.h>
+#include <tvm/topi/rocm/pooling.h>
+#include <tvm/topi/rocm/reduction.h>
+#include <tvm/topi/rocm/softmax.h>
+#include <tvm/topi/x86/bnn.h>
+#include <tvm/topi/x86/default.h>
+#include <tvm/topi/x86/injective.h>
+
+namespace tvm {
 namespace topi {
 
 using namespace tvm;
@@ -319,3 +320,4 @@ TVM_REGISTER_GENERIC_FUNC(dense)
     .register_func({"rocm"}, WrapDenseOp(topi::rocm::dense_rocm));
 
 }  // namespace topi
+}  // namespace tvm
similarity index 98%
rename from topi/src/transform.cc
rename to src/topi/transform.cc
index ab39a5e..7a76c60 100644 (file)
  * \brief Registration of transform operators
  * \file transform.cc
  */
-#include <topi/transform.h>
-#include <topi/util.h>
 #include <tvm/runtime/packed_func.h>
 #include <tvm/runtime/registry.h>
+#include <tvm/topi/transform.h>
+#include <tvm/topi/util.h>
 
+namespace tvm {
 namespace topi {
 
 using namespace tvm;
@@ -176,3 +177,4 @@ TVM_REGISTER_GLOBAL("topi.one_hot").set_body([](TVMArgs args, TVMRetValue* rv) {
 });
 
 }  // namespace topi
+}  // namespace tvm
similarity index 94%
rename from topi/src/vision.cc
rename to src/topi/vision.cc
index 0485177..e64eec4 100644 (file)
  * \brief Registration of vision operators
  * \file vision.cc
  */
-#include <topi/vision/reorg.h>
 #include <tvm/runtime/packed_func.h>
 #include <tvm/runtime/registry.h>
+#include <tvm/topi/vision/reorg.h>
 
+namespace tvm {
 namespace topi {
 
 using namespace tvm;
@@ -35,3 +36,4 @@ TVM_REGISTER_GLOBAL("topi.vision.reorg").set_body([](TVMArgs args, TVMRetValue*
 });
 
 }  // namespace topi
+}  // namespace tvm
index 8526605..b2de0ef 100644 (file)
 
 #include <dmlc/logging.h>
 #include <gtest/gtest.h>
-#include <topi/nn.h>
 #include <tvm/auto_scheduler/compute_dag.h>
 #include <tvm/runtime/container.h>
 #include <tvm/te/operation.h>
+#include <tvm/topi/nn.h>
 
 #include <unordered_set>
 
index 206470f..2462fd1 100644 (file)
 
 #include <dmlc/logging.h>
 #include <gtest/gtest.h>
-#include <topi/cuda/injective.h>
 #include <tvm/driver/driver_api.h>
 #include <tvm/runtime/registry.h>
 #include <tvm/te/operation.h>
+#include <tvm/topi/cuda/injective.h>
 
 #include <cmath>
 #include <string>
index 636593f..5de4ada 100644 (file)
@@ -18,8 +18,6 @@
  */
 
 #include <gtest/gtest.h>
-#include <topi/broadcast.h>
-#include <topi/generic/injective.h>
 #include <tvm/driver/driver_api.h>
 #include <tvm/ir/module.h>
 #include <tvm/relay/analysis.h>
@@ -32,6 +30,8 @@
 #include <tvm/runtime/packed_func.h>
 #include <tvm/runtime/registry.h>
 #include <tvm/te/operation.h>
+#include <tvm/topi/broadcast.h>
+#include <tvm/topi/generic/injective.h>
 
 using namespace tvm;
 using namespace tvm::relay;
index bb4bf92..1a12aec 100644 (file)
@@ -18,8 +18,6 @@
  */
 
 #include <gtest/gtest.h>
-#include <topi/broadcast.h>
-#include <topi/generic/injective.h>
 #include <tvm/driver/driver_api.h>
 #include <tvm/ir/module.h>
 #include <tvm/node/structural_equal.h>
@@ -32,6 +30,8 @@
 #include <tvm/runtime/packed_func.h>
 #include <tvm/runtime/registry.h>
 #include <tvm/te/operation.h>
+#include <tvm/topi/broadcast.h>
+#include <tvm/topi/generic/injective.h>
 
 using namespace tvm;
 
index 10c7b9d..22ef8c7 100644 (file)
  */
 
 #include <gtest/gtest.h>
-#include <topi/elemwise.h>
 #include <tvm/te/operation.h>
+#include <tvm/topi/elemwise.h>
 
+namespace tvm {
 namespace topi {
 TEST(Tensor, Basic) {
   using namespace tvm;
@@ -29,6 +30,7 @@ TEST(Tensor, Basic) {
   auto C = topi::exp(A);
 }
 }  // namespace topi
+}  // namespace tvm
 
 int main(int argc, char** argv) {
   testing::InitGoogleTest(&argc, argv);
index 70709b0..6f95774 100644 (file)
@@ -32,7 +32,6 @@
 #include <gtest/gtest.h>
 #include <spawn.h>
 #include <sys/wait.h>
-#include <topi/generic/injective.h>
 #include <tvm/driver/driver_api.h>
 #include <tvm/relay/analysis.h>
 #include <tvm/relay/expr.h>
@@ -43,6 +42,7 @@
 #include <tvm/runtime/packed_func.h>
 #include <tvm/runtime/registry.h>
 #include <tvm/te/operation.h>
+#include <tvm/topi/generic/injective.h>
 
 TVM_REGISTER_GLOBAL("test.sch").set_body([](tvm::TVMArgs args, tvm::TVMRetValue* rv) {
   *rv = topi::generic::schedule_injective(args[0], args[1]);
index 54f4ff6..00ddcd3 100644 (file)
@@ -18,7 +18,7 @@ import pytest
 import tvm
 from tvm import te
 import numpy as np
-import topi.testing
+import tvm.topi.testing
 from tvm.contrib import cblas
 
 def verify_matmul_add(m, l, n, transa=False, transb=False, dtype="float32"):
@@ -131,7 +131,7 @@ def verify_batch_matmul(batch, m, l, n, transa=False, transb=False, iterative=Fa
             a = a.transpose(0, 2, 1)
         if not transb:
             b = b.transpose(0, 2, 1)
-        return topi.testing.batch_matmul(a, b)
+        return tvm.topi.testing.batch_matmul(a, b)
 
     def verify(target="llvm"):
         if not tvm.runtime.enabled(target):
index 17cb0d1..61822c8 100644 (file)
@@ -19,7 +19,7 @@ from tvm import te
 from tvm.contrib import cudnn
 from tvm.contrib.nvcc import have_fp16
 import numpy as np
-import topi.testing
+import tvm.topi.testing
 
 def verify_conv2d(data_dtype, conv_dtype, tensor_format=0, groups=1):
     in_channel = 4
@@ -79,10 +79,10 @@ def verify_conv2d(data_dtype, conv_dtype, tensor_format=0, groups=1):
     w = tvm.nd.array(w_np, ctx)
     y = tvm.nd.array(y_np, ctx)
     if tensor_format == 0:
-        c_np = topi.testing.conv2d_nchw_python(x_np, w_np, 1, 1, groups=groups)
+        c_np = tvm.topi.testing.conv2d_nchw_python(x_np, w_np, 1, 1, groups=groups)
     elif tensor_format == 1:
         wt = w_np.transpose((1, 2, 3, 0))  #OHWI => HWIO
-        c_np = topi.testing.conv2d_nhwc_python(x_np, wt, 1, 1, groups=groups)
+        c_np = tvm.topi.testing.conv2d_nhwc_python(x_np, wt, 1, 1, groups=groups)
 
     f(x, w, y)
     tvm.testing.assert_allclose(y.asnumpy(), c_np, atol=1e-2, rtol=1e-2)
@@ -154,7 +154,7 @@ def verify_conv3d(data_dtype, conv_dtype, tensor_format=0, groups=1):
     w = tvm.nd.array(w_np, ctx)
     y = tvm.nd.array(y_np, ctx)
     if tensor_format == 0:
-        c_np = topi.testing.conv3d_ncdhw_python(x_np, w_np, 1, 1, groups)
+        c_np = tvm.topi.testing.conv3d_ncdhw_python(x_np, w_np, 1, 1, groups)
     else:
         raise AssertionError("For now, conv3d tensor format only support: 0(NCHW)")
 
@@ -172,7 +172,7 @@ def verify_softmax(shape, axis, dtype="float32"):
 
     ctx = tvm.gpu(0)
     a_np = np.random.uniform(size=shape).astype(dtype)
-    b_np = topi.testing.softmax_python(a_np)
+    b_np = tvm.topi.testing.softmax_python(a_np)
     a = tvm.nd.array(a_np, ctx)
     b = tvm.nd.array(b_np, ctx)
     f = tvm.build(s, [A, B], "cuda", target_host="llvm", name="softmax")
@@ -187,7 +187,7 @@ def verify_softmax_4d(shape, dtype="float32"):
     ctx = tvm.gpu(0)
     n, c, h, w = shape
     a_np = np.random.uniform(size=shape).astype(dtype)
-    b_np = topi.testing.softmax_python(a_np.transpose(0, 2, 3, 1).reshape(h*w, c))
+    b_np = tvm.topi.testing.softmax_python(a_np.transpose(0, 2, 3, 1).reshape(h*w, c))
     b_np = b_np.reshape(n, h, w, c).transpose(0, 3, 1, 2)
     a = tvm.nd.array(a_np, ctx)
     b = tvm.nd.array(b_np, ctx)
index 1fd5974..9ae2c9f 100644 (file)
@@ -18,7 +18,7 @@
 import tvm
 from tvm import te
 import numpy as np
-from topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int16
+from tvm.topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int16
 
 
 def benchmark_fc_int8_acc16():
index f723ccb..37101a8 100644 (file)
@@ -19,8 +19,8 @@
 import tvm
 from tvm import te
 import numpy as np
-from topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32_cascadelake
-from topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32
+from tvm.topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32_cascadelake
+from tvm.topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32
 import pytest
 
 
index ed671e0..deffbe9 100644 (file)
@@ -55,7 +55,7 @@ def test_conv2d():
                               data_type=1)
 
     yshape = [x.value for x in Y.shape]
-    import topi
+    from tvm import topi
     s = te.create_schedule(Y.op)
 
     def verify():
index 37c1644..230e8db 100644 (file)
@@ -22,7 +22,7 @@ def mxnet_check():
     User can directly run this script to verify correctness.
     """
     import mxnet as mx
-    import topi
+    from tvm import topi
     import tvm
     from tvm import te
     import numpy as np
index 505199a..81fcb12 100644 (file)
@@ -18,7 +18,7 @@ import tvm
 from tvm import te
 import numpy as np
 import scipy.signal
-from topi.nn.util import get_pad_tuple
+from tvm.topi.nn.util import get_pad_tuple
 from tvm.contrib import nnpack
 import pytest
 
index 58ff064..c6c480e 100644 (file)
@@ -17,7 +17,7 @@
 from tvm import te
 import numpy as np
 import re
-import topi
+from tvm import topi
 
 
 def findany(pattern, str):
index 179f5b4..6f058f4 100644 (file)
@@ -22,11 +22,11 @@ from coremltools.models import datatypes
 import tvm
 from tvm import te
 from tvm.contrib import graph_runtime
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 from tvm import relay
 from tvm.relay.testing.config import ctx_list
-from topi.testing import conv2d_nchw_python
+from tvm.topi.testing import conv2d_nchw_python
 
 import coremltools as cm
 import model_zoo
@@ -186,11 +186,11 @@ def verify_UpsampleLayerParams(input_dim, scale, mode):
 
     a_np = np.full(input_dim, 1, dtype=dtype)
     if mode == 'NN':
-        b_np = topi.testing.upsampling_python(a_np, (scale, scale))
+        b_np = tvm.topi.testing.upsampling_python(a_np, (scale, scale))
     else:
         new_h = input_dim[2] * scale
         new_w = input_dim[3] * scale
-        b_np = topi.testing.bilinear_resize_python(a_np, (new_h, new_w), 'NCHW')
+        b_np = tvm.topi.testing.bilinear_resize_python(a_np, (new_h, new_w), 'NCHW')
 
     input = [('input', datatypes.Array(*input_dim))]
     output = [('output', datatypes.Array(*b_np.shape))]
@@ -215,7 +215,7 @@ def verify_l2_normalize(input_dim, eps):
     dtype = "float32"
 
     a_np = np.random.uniform(size=input_dim).astype(dtype)
-    b_np = topi.testing.l2_normalize_python(a_np, eps, 1)
+    b_np = tvm.topi.testing.l2_normalize_python(a_np, eps, 1)
 
     input = [('input', datatypes.Array(*input_dim))]
     output = [('output', datatypes.Array(*b_np.shape))]
@@ -234,7 +234,7 @@ def verify_lrn(input_dim, size, bias, alpha, beta):
     dtype = "float32"
     axis=1
     a_np = np.random.uniform(size=input_dim).astype(dtype)
-    b_np = topi.testing.lrn_python(a_np, size, axis, bias, alpha, beta)
+    b_np = tvm.topi.testing.lrn_python(a_np, size, axis, bias, alpha, beta)
 
     input = [('input', datatypes.Array(*input_dim))]
     output = [('output', datatypes.Array(*b_np.shape))]
index 8654bf0..56ea96d 100644 (file)
@@ -20,8 +20,8 @@ import onnx
 from onnx import helper, TensorProto, mapping
 import torch
 import torchvision
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 import tvm
 from tvm import te
 from tvm import relay
@@ -615,7 +615,7 @@ def test_isnan():
 def verify_gather_nd(in_shape, indices, dtype):
     x = np.random.uniform(size=in_shape).astype(dtype)
     indices = np.array(indices, dtype="int32")
-    out_np = topi.testing.gather_nd_python(x, indices)
+    out_np = tvm.topi.testing.gather_nd_python(x, indices)
 
     y = helper.make_node("GatherND", ['in', 'indices'], ['out'])
 
@@ -823,7 +823,7 @@ def _test_upsample_nearest():
                          'out'], mode='nearest', scales=[1.0, 1.0, 2.0, 2.0])
 
     in_array = np.random.uniform(size=in_shape).astype(np.float32)
-    out_array = topi.testing.upsampling_python(
+    out_array = tvm.topi.testing.upsampling_python(
         in_array, (scale, scale), "NCHW")
 
     graph = helper.make_graph([y],
@@ -848,7 +848,7 @@ def _test_upsample3d_nearest():
                          'out'], mode='nearest', scales=[1.0, 1.0, 2.0, 2.0, 2.0])
 
     in_array = np.random.uniform(size=in_shape).astype(np.float32)
-    out_array = topi.testing.upsampling3d_python(
+    out_array = tvm.topi.testing.upsampling3d_python(
         in_array, (scale, scale, scale), "NCDHW")
 
     graph = helper.make_graph([y],
@@ -872,7 +872,7 @@ def _test_upsample_bilinear():
                          'out'], mode='linear', scales=[1.0, 1.0, 2.0, 2.0])
 
     in_array = np.random.uniform(size=in_shape).astype(np.float32)
-    out_array = topi.testing.bilinear_resize_python(
+    out_array = tvm.topi.testing.bilinear_resize_python(
         in_array, (3*scale, 3*scale), "NCHW")
 
     graph = helper.make_graph([y],
@@ -896,7 +896,7 @@ def _test_upsample_bilinear_opset9():
     y = helper.make_node("Upsample", ['in', 'scales'], ['out'], mode='linear')
     scales = [1, 1, 2, 2]
     in_array = np.random.uniform(size=in_shape).astype(np.float32)
-    out_array = topi.testing.bilinear_resize_python(
+    out_array = tvm.topi.testing.bilinear_resize_python(
         in_array, (3*scale, 3*scale), "NCHW")
 
     ref_node = helper.make_node('Constant',
@@ -931,7 +931,7 @@ def _test_upsample3d_trilinear():
     y = helper.make_node("Upsample", ['in', 'scales'], ['out'], mode='linear')
     scales = [1.0, 1.0, 2.0, 2.0, 2.0]
     in_array = np.random.uniform(size=in_shape).astype(np.float32)
-    out_array = topi.testing.trilinear_resize3d_python(
+    out_array = tvm.topi.testing.trilinear_resize3d_python(
         in_array, (3*scale, 3*scale, 3*scale), "NCDHW", coordinate_transformation_mode="half_pixel")
 
     ref_array = np.array(scales)
@@ -968,7 +968,7 @@ def _test_softmax(inshape, axis):
     opname = 'Softmax'
     indata = np.random.uniform(size=inshape).astype(np.float32)
     outshape = inshape
-    outdata = topi.testing.softmax_python(indata)
+    outdata = tvm.topi.testing.softmax_python(indata)
     if isinstance(axis, int):
         y = helper.make_node(opname, ['in'], ['out'], axis=axis)
     elif axis is None:
@@ -1705,7 +1705,7 @@ def test_Scale():
 
 def test_LogSoftmax():
     _test_onnx_op_elementwise((1, 4),
-                              topi.testing.log_softmax_python,
+                              tvm.topi.testing.log_softmax_python,
                               {},
                               'float32',
                               'LogSoftmax',
index c974496..994a047 100644 (file)
@@ -21,9 +21,9 @@ from tvm import autotvm
 from tvm.autotvm.task.space import FallbackConfigEntity
 from tvm.contrib import nnpack
 from tvm.contrib.pickle_memoize import memoize
-import topi
-import topi.testing
-from topi.util import get_const_tuple
+from tvm import topi
+import tvm.topi.testing
+from tvm.topi.util import get_const_tuple
 from pytest import skip
 
 
@@ -47,8 +47,8 @@ def verify_conv2d_nchw(batch, in_channel, in_size, num_filter, kernel, stride, p
         a_np = np.random.uniform(size=a_shape).astype(dtype)
         w_np = np.random.uniform(size=w_shape).astype(dtype)
         b_np = np.random.uniform(size=bias_shape).astype(dtype)
-        dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
-        c_np = topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding)
+        dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
+        c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding)
         if add_bias:
             b_np = np.random.uniform(size=bias_shape).astype(dtype)
             c_np += b_np
index f9d9c93..a84020d 100644 (file)
@@ -22,7 +22,7 @@ from tvm import te
 from tvm import relay
 from tvm.relay.loops import while_loop
 from tvm.relay.testing import run_infer_type as infer_type
-import topi.testing
+import tvm.topi.testing
 
 def int32(val):
     return relay.const(val, 'int32')
@@ -652,7 +652,7 @@ def verify_any_strided_slice(data_shape, begin_shape, end_shape, strides_shape,
     np_end = np.random.randint(5, 10, size=end_shape, dtype="int32")
     np_strides = np.random.randint(1, 2 if slice_mode == "size" else 3, size=strides_shape, dtype="int32")
     # target numpy result
-    ref_res = topi.testing.strided_slice_python(np_data, np_begin, np_end, np_strides, slice_mode)
+    ref_res = tvm.topi.testing.strided_slice_python(np_data, np_begin, np_end, np_strides, slice_mode)
 
     # Relay Module
     mod = tvm.IRModule()
@@ -827,7 +827,7 @@ def verify_any_crop_and_resize(data_shape, boxes_shape, box_indices_shape, crop_
     mod["main"] = relay.Function([data, boxes, box_indices], y)
     data_np = np.random.uniform(size=data_shape).astype(dtype)
     boxes_np = np.random.uniform(size=static_boxes).astype(dtype)
-    box_indices_np = np.random.uniform(size=static_box_indices_shape).astype(indices_dtype)    
+    box_indices_np = np.random.uniform(size=static_box_indices_shape).astype(indices_dtype)
     for kind in ["debug", "vm"]:
         ex = relay.create_executor(kind, mod=mod, ctx=tvm.cpu(), target="llvm")
         result = ex.evaluate()(data_np, boxes_np, box_indices_np)
@@ -835,8 +835,8 @@ def verify_any_crop_and_resize(data_shape, boxes_shape, box_indices_shape, crop_
 
 def test_any_crop_and_resize():
     verify_any_crop_and_resize(
-        data_shape=(1, 234, 234, 256), 
-        boxes_shape=(relay.Any(), 4), 
+        data_shape=(1, 234, 234, 256),
+        boxes_shape=(relay.Any(), 4),
         box_indices_shape=(relay.Any(),),
         crop_size=(14, 14),
         layout='NHWC',
@@ -844,8 +844,8 @@ def test_any_crop_and_resize():
         static_box_indices_shape=(128,),
         ref_out_shape=(128, 14, 14, 256))
     verify_any_crop_and_resize(
-        data_shape=(1, 256, 234, 234), 
-        boxes_shape=(relay.Any(), 4), 
+        data_shape=(1, 256, 234, 234),
+        boxes_shape=(relay.Any(), 4),
         box_indices_shape=(relay.Any(),),
         crop_size=(14, 14),
         layout='NCHW',
index 1b4e08f..6bc170d 100644 (file)
@@ -20,7 +20,7 @@ from tvm import te
 import tvm.testing
 from tvm import relay
 from tvm import autotvm
-import topi
+from tvm import topi
 from tvm.relay.testing import run_infer_type
 from tvm.relay.testing.temp_op_attr import TempOpAttr
 
index a771d29..cb95955 100644 (file)
@@ -19,7 +19,7 @@ import scipy
 from scipy import special
 import tvm
 import tvm.relay as relay
-import topi
+from tvm import topi
 from tvm import te
 from tvm.contrib import graph_runtime
 
index 7985836..8b434d6 100644 (file)
@@ -16,8 +16,8 @@
 # under the License.
 import numpy as np
 
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 import tvm
 from tvm import te
 from tvm import relay
@@ -38,9 +38,10 @@ def verify_max_pool2d_grad(x_shape, pool_size, strides, padding, ceil_mode):
     ph, pw = padding
     y_shape = topi.util.get_const_tuple(fwd_func.ret_type.shape)
     out_grad = np.ones(shape=y_shape)
-    ref_grad = topi.testing.pool_grad_nchw(data, out_grad, pool_size=pool_size, strides=strides,
-                                           padding=[ph, pw, ph, pw],
-                                           pool_type='max', ceil_mode=ceil_mode)
+    ref_grad = tvm.topi.testing.pool_grad_nchw(
+        data, out_grad, pool_size=pool_size, strides=strides,
+        padding=[ph, pw, ph, pw],
+        pool_type='max', ceil_mode=ceil_mode)
 
     for target, ctx in ctx_list():
         intrp = relay.create_executor(ctx=ctx, target=target)
@@ -66,9 +67,10 @@ def verify_avg_pool2d_grad(x_shape, pool_size, strides, padding, ceil_mode, coun
     ph, pw = padding
     y_shape = topi.util.get_const_tuple(fwd_func.ret_type.shape)
     out_grad = np.ones(shape=y_shape)
-    ref_grad = topi.testing.pool_grad_nchw(data, out_grad, pool_size=pool_size, strides=strides,
-                                           padding=[ph, pw, ph, pw],
-                                           pool_type='avg', ceil_mode=ceil_mode)
+    ref_grad = tvm.topi.testing.pool_grad_nchw(
+        data, out_grad, pool_size=pool_size, strides=strides,
+        padding=[ph, pw, ph, pw],
+        pool_type='avg', ceil_mode=ceil_mode)
 
     for target, ctx in ctx_list():
         intrp = relay.create_executor(ctx=ctx, target=target)
@@ -93,9 +95,10 @@ def verify_global_avg_pool2d_grad(x_shape):
     data = np.random.rand(*x_shape).astype("float32")
     y_shape = topi.util.get_const_tuple(fwd_func.ret_type.shape)
     out_grad = np.ones(shape=y_shape)
-    ref_grad = topi.testing.pool_grad_nchw(data, out_grad, pool_size=(x_shape[2], x_shape[3]),
-                                            strides=(1, 1), padding=[0, 0, 0, 0], pool_type='avg',
-                                            ceil_mode=False)
+    ref_grad = tvm.topi.testing.pool_grad_nchw(
+        data, out_grad, pool_size=(x_shape[2], x_shape[3]),
+        strides=(1, 1), padding=[0, 0, 0, 0], pool_type='avg',
+        ceil_mode=False)
 
     for target, ctx in ctx_list():
         intrp = relay.create_executor(ctx=ctx, target=target)
index c58ff0f..4616a14 100644 (file)
@@ -22,7 +22,7 @@ import scipy
 from tvm import relay
 from tvm.relay import transform
 from tvm.relay.testing import ctx_list, run_infer_type
-import topi.testing
+import tvm.topi.testing
 from tvm.contrib.nvcc import have_fp16
 
 
@@ -196,7 +196,7 @@ def test_softmax():
         assert yy.checked_type == relay.TensorType(shape, dtype)
         func = relay.Function([x], y)
         x_data = np.random.uniform(size=shape).astype(dtype)
-        ref_res = topi.testing.softmax_python(x_data)
+        ref_res = tvm.topi.testing.softmax_python(x_data)
         for target, ctx in ctx_list():
             intrp = relay.create_executor("graph", ctx=ctx, target=target)
             op_res = intrp.evaluate(func)(x_data)
@@ -216,7 +216,7 @@ def test_log_softmax():
         assert yy.checked_type == relay.TensorType(shape, dtype)
         func = relay.Function([x], y)
         x_data = np.random.uniform(size=shape).astype(dtype)
-        ref_res = topi.testing.log_softmax_python(x_data)
+        ref_res = tvm.topi.testing.log_softmax_python(x_data)
         for target, ctx in ctx_list():
             intrp = relay.create_executor("graph", ctx=ctx, target=target)
             op_res = intrp.evaluate(func)(x_data)
index a79f1a5..f65407a 100644 (file)
 import numpy as np
 import tvm
 from tvm import te
-import topi.testing
+import tvm.topi.testing
 from tvm import relay
 from tvm.relay import transform
 from tvm.relay.testing import ctx_list, run_infer_type
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 
 
 def test_checkpoint():
@@ -213,7 +213,7 @@ def test_broadcast_to_like():
     x = relay.Var("x", relay.ty.TensorType(shape , dtype))
     y = relay.Var("y", relay.ty.TensorType(shape_like, dtype))
     z = relay.broadcast_to_like(x, y)
-    
+
     zz = run_infer_type(z)
     assert zz.checked_type == relay.ty.TensorType(shape_like, dtype)
 
@@ -221,7 +221,7 @@ def test_broadcast_to_like():
     x = np.random.uniform(size=shape).astype(dtype)
     y = np.random.uniform(size=shape_like).astype(dtype)
     ref_res = np.broadcast_to(x, shape_like)
-    
+
     for target, ctx in ctx_list():
         for kind in ["graph", "debug"]:
             intrp = relay.create_executor(kind, ctx=ctx, target=target)
@@ -318,7 +318,7 @@ def verify_batch_matmul(x_shape, y_shape, out_shape, dtype="float32"):
     func = relay.Function([x, y], z)
     x_np = np.random.uniform(size=x_shape).astype(dtype)
     y_np = np.random.uniform(size=y_shape).astype(dtype)
-    z_np = topi.testing.batch_matmul(x_np, y_np)
+    z_np = tvm.topi.testing.batch_matmul(x_np, y_np)
 
     for target, ctx in ctx_list():
         for kind in ["graph", "debug"]:
@@ -378,7 +378,7 @@ def verify_adaptive_pool(dshape, out_size, pool_type, layout, dtype, opfunc):
     func = relay.Function([x], y)
 
     np_data = np.random.uniform(low=0, high=255, size=dshape).astype(dtype)
-    np_out = topi.testing.adaptive_pool(np_data, out_size, pool_type, layout)
+    np_out = tvm.topi.testing.adaptive_pool(np_data, out_size, pool_type, layout)
 
     for target, ctx in ctx_list():
         intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
@@ -421,7 +421,7 @@ def test_sequence_mask():
         func = relay.Function([data, valid_length], out)
         data_np = np.random.uniform(size=data_shape).astype(dtype)
         valid_length_np = np.random.randint(0, max_length, size=nbatch).astype(itype)
-        gt_out_np = topi.testing.sequence_mask(data_np, valid_length_np, mask_value, axis)
+        gt_out_np = tvm.topi.testing.sequence_mask(data_np, valid_length_np, mask_value, axis)
 
         for target, ctx in ctx_list():
             for kind in ["graph", "debug"]:
@@ -456,7 +456,7 @@ def test_one_hot():
         assert checked.checked_type == relay.ty.TensorType(_get_oshape(indices_shape, depth, axis), dtype)
         func = relay.Function([indices], out)
         indices_np = np.random.randint(0, depth, size=indices_shape).astype("int32")
-        out_np = topi.testing.one_hot(indices_np, on_value, off_value, depth, axis, dtype)
+        out_np = tvm.topi.testing.one_hot(indices_np, on_value, off_value, depth, axis, dtype)
 
         for target, ctx in ctx_list():
             for kind in ["graph", "debug"]:
index cd54d9f..b26d6e4 100644 (file)
@@ -24,8 +24,8 @@ from tvm import relay
 from tvm.relay import transform
 from tvm.relay.testing import ctx_list, run_infer_type
 from tvm.contrib import util
-import topi.testing
-from topi.cuda.conv3d_winograd import _infer_tile_size
+import tvm.topi.testing
+from tvm.topi.cuda.conv3d_winograd import _infer_tile_size
 
 
 def test_conv1d_infer_type():
@@ -97,7 +97,7 @@ def test_conv1d_run():
         func = relay.Function([x, w], y)
         data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
         kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
-        ref_res = topi.testing.conv1d_ncw_python(
+        ref_res = tvm.topi.testing.conv1d_ncw_python(
             data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, dilation)
 
         for target, ctx in ctx_list():
@@ -210,9 +210,9 @@ def test_conv2d_run():
         func = relay.Function([x, w], y)
         data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
         kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
-        dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation)
+        dkernel = tvm.topi.testing.dilate_python(kernel, (1, 1) + dilation)
         if fref is None:
-            ref_res = topi.testing.conv2d_nchw_python(
+            ref_res = tvm.topi.testing.conv2d_nchw_python(
                 data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding,
                 groups=groups)
         else:
@@ -271,7 +271,7 @@ def test_conv2d_run():
     kshape = (32, 1, 3, 3)
     run_test_conv2d("float32", "float32", 1, dshape, kshape,
                     padding=(1, 1), channels=32, groups=32, kernel_size=(3 ,3),
-                    fref=lambda x, w: topi.testing.depthwise_conv2d_python_nchw(
+                    fref=lambda x, w: tvm.topi.testing.depthwise_conv2d_python_nchw(
                         x, w, (1, 1), "SAME"))
 
     # depthwise conv2d for arm_cpu
@@ -352,7 +352,7 @@ def test_conv2d_winograd():
 
         data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
         kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
-        ref_res = topi.testing.conv2d_nchw_python(
+        ref_res = tvm.topi.testing.conv2d_nchw_python(
             data.astype(out_dtype), kernel.astype(out_dtype), 1, padding,
             groups=groups)
 
@@ -456,9 +456,9 @@ def test_conv3d_run():
         func = relay.Function([x, w], y)
         data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
         kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
-        dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation)
+        dkernel = tvm.topi.testing.dilate_python(kernel, (1, 1) + dilation)
         if fref is None:
-            ref_res = topi.testing.conv3d_ncdhw_python(
+            ref_res = tvm.topi.testing.conv3d_ncdhw_python(
                 data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding,
                 groups=groups)
         else:
@@ -501,9 +501,9 @@ def test_conv3d_ndhwc_run():
         func = relay.Function([x, w], y)
         data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
         kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
-        dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation)
+        dkernel = tvm.topi.testing.dilate_python(kernel, (1, 1) + dilation)
         if fref is None:
-            ref_res = topi.testing.conv3d_ndhwc_python(
+            ref_res = tvm.topi.testing.conv3d_ndhwc_python(
                 data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding)
         else:
             ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype))
@@ -574,7 +574,7 @@ def test_conv3d_winograd():
 
         data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
         kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
-        ref_res = topi.testing.conv3d_ncdhw_python(
+        ref_res = tvm.topi.testing.conv3d_ncdhw_python(
             data.astype(out_dtype), kernel.astype(out_dtype), 1, padding,
             groups=groups)
 
@@ -664,7 +664,7 @@ def test_conv3d_transpose_ncdhw_run():
     data = np.random.uniform(size=dshape).astype(dtype)
     kernel = np.random.uniform(size=kshape).astype(dtype)
 
-    ref_res = topi.testing.conv3d_transpose_ncdhw_python(data, kernel, 1, 1)
+    ref_res = tvm.topi.testing.conv3d_transpose_ncdhw_python(data, kernel, 1, 1)
 
     for target, ctx in ctx_list():
         intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
@@ -714,7 +714,7 @@ def test_conv2d_transpose_nchw_run():
     dtype = "float32"
     data = np.random.uniform(size=dshape).astype(dtype)
     kernel = np.random.uniform(size=kshape).astype(dtype)
-    ref_res = topi.testing.conv2d_transpose_nchw_python(
+    ref_res = tvm.topi.testing.conv2d_transpose_nchw_python(
         data, kernel, 2, 1, (1, 1))
 
     for target, ctx in ctx_list():
@@ -741,7 +741,7 @@ def test_conv2d_transpose_nhwc_run():
     kernel = np.random.uniform(size=kshape_hwoi).astype(dtype)
     # use true kshape layout here - HWOI
 
-    ref_res = topi.testing.conv2d_transpose_nhwc_python(data, kernel, 'HWOI',
+    ref_res = tvm.topi.testing.conv2d_transpose_nhwc_python(data, kernel, 'HWOI',
                                                         2, 1, output_padding=(1, 1))
 
     for target, ctx in ctx_list():
@@ -763,7 +763,7 @@ def test_conv1d_transpose_ncw_run():
     dtype = "float32"
     data = np.random.uniform(size=dshape).astype(dtype)
     kernel = np.random.uniform(size=kshape).astype(dtype)
-    ref_res = topi.testing.conv1d_transpose_ncw_python(
+    ref_res = tvm.topi.testing.conv1d_transpose_ncw_python(
         data, kernel, 2, 1, output_padding=(1,))
 
     for target, ctx in ctx_list():
@@ -900,7 +900,7 @@ def test_pool1d():
         y = opfunc(x, pool_size=pool_size, strides=strides, padding=padding)
         func = relay.Function([x], y)
         data = np.random.uniform(size=dshape).astype(dtype)
-        ref_res = topi.testing.pool1d_ncw_python(data, (2,), (2,),
+        ref_res = tvm.topi.testing.pool1d_ncw_python(data, (2,), (2,),
                                                  (0, 0), (1, 3, 16), pool_type, False)
         for target, ctx in ctx_list():
             intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
@@ -938,7 +938,7 @@ def test_pool3d():
         assert out_shape == f_out_shape, \
             "Output shape mismatch. expected {}, actual {}".format(out_shape, f_out_shape)
         data = np.random.uniform(size=dshape).astype(dtype)
-        ref_res = topi.testing.pool3d_ncdhw_python(data, pool_size, strides,
+        ref_res = tvm.topi.testing.pool3d_ncdhw_python(data, pool_size, strides,
                                                    padding, out_shape, pool_type, False)
         for target, ctx in ctx_list():
             intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
@@ -1080,7 +1080,7 @@ def test_lrn():
     assert yy.checked_type == relay.TensorType(shape, dtype)
     func = relay.Function([x], z)
     x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
-    ref_res = topi.testing.lrn_python(x_data, size, axis, bias, alpha, beta)
+    ref_res = tvm.topi.testing.lrn_python(x_data, size, axis, bias, alpha, beta)
 
     for target, ctx in ctx_list():
         intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
@@ -1108,7 +1108,7 @@ def test_l2_normalize():
     assert yy.checked_type == relay.TensorType(shape, dtype)
     func = relay.Function([x], z)
     x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
-    ref_res = topi.testing.l2_normalize_python(x_data, eps, axis)
+    ref_res = tvm.topi.testing.l2_normalize_python(x_data, eps, axis)
 
     for target, ctx in ctx_list():
         intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
@@ -1163,9 +1163,9 @@ def _test_upsampling(layout, method, align_corners=False):
     func = relay.Function([x], y)
     data = np.random.uniform(size=dshape).astype(dtype)
     if method == "nearest_neighbor":
-        ref = topi.testing.upsampling_python(data, (scale_h, scale_w), layout)
+        ref = tvm.topi.testing.upsampling_python(data, (scale_h, scale_w), layout)
     else:
-        ref = topi.testing.bilinear_resize_python(data, (int(round(h*scale_h)),
+        ref = tvm.topi.testing.bilinear_resize_python(data, (int(round(h*scale_h)),
                                                   int(round(w*scale_w))), layout)
     for target, ctx in ctx_list():
         executor = relay.create_executor("graph", ctx=ctx, target=target)
@@ -1208,9 +1208,9 @@ def _test_upsampling3d(layout, method, coordinate_transformation_mode="half_pixe
     func = relay.Function([x], y)
     data = np.random.uniform(size=dshape).astype(dtype)
     if method == "nearest_neighbor":
-        ref = topi.testing.upsampling3d_python(data, (scale_d, scale_h, scale_w), layout)
+        ref = tvm.topi.testing.upsampling3d_python(data, (scale_d, scale_h, scale_w), layout)
     else:
-        ref = topi.testing.trilinear_resize3d_python(data, (int(round(d*scale_d)),\
+        ref = tvm.topi.testing.trilinear_resize3d_python(data, (int(round(d*scale_d)),\
                                                      int(round(h*scale_h)),\
                                                      int(round(w*scale_w))), layout)
     for target, ctx in ctx_list():
@@ -1421,7 +1421,7 @@ def test_correlation():
         func = relay.Function([data1, data2], y)
         data1_np = np.random.uniform(size=data_shape).astype(dtype)
         data2_np = np.random.uniform(size=data_shape).astype(dtype)
-        ref_res = topi.testing.correlation_nchw_python(data1_np, data2_np, kernel_size, max_displacement, stride1, stride2, padding, is_multiply)
+        ref_res = tvm.topi.testing.correlation_nchw_python(data1_np, data2_np, kernel_size, max_displacement, stride1, stride2, padding, is_multiply)
 
         for target, ctx in ctx_list():
             intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
index 74231cb..c800b1c 100644 (file)
@@ -20,7 +20,7 @@ import numpy as np
 from tvm import relay
 from tvm.relay import transform
 from tvm.relay.testing import ctx_list, run_infer_type
-import topi.testing
+import tvm.topi.testing
 
 
 def test_binary_op():
@@ -305,7 +305,7 @@ def test_strided_slice():
 
         # target numpy result
         x_data = np.random.uniform(size=dshape).astype("float32")
-        ref_res = topi.testing.strided_slice_python(
+        ref_res = tvm.topi.testing.strided_slice_python(
             x_data, begin, end, strides, slice_mode)
 
         if attr_const:
@@ -380,7 +380,7 @@ def test_strided_set():
             return
         x_data = np.random.uniform(size=dshape).astype("float32")
         v_data = np.random.uniform(size=vshape).astype("float32")
-        ref_res = topi.testing.strided_set_python(
+        ref_res = tvm.topi.testing.strided_set_python(
             x_data, v_data, begin, end, strides)
         for target, ctx in ctx_list():
             intrp = relay.create_executor("graph", ctx=ctx, target=target)
index 3a94fc6..7067803 100644 (file)
@@ -23,7 +23,7 @@ from tvm import te
 from tvm import relay
 from tvm.relay import transform
 from tvm.relay.testing import ctx_list, run_infer_type
-import topi.testing
+import tvm.topi.testing
 
 
 def test_resize_infer_type():
@@ -49,9 +49,9 @@ def test_resize():
 
         x_data = np.random.uniform(size=dshape).astype("float32")
         if method == "bilinear":
-            ref_res = topi.testing.bilinear_resize_python(x_data, size, layout)
+            ref_res = tvm.topi.testing.bilinear_resize_python(x_data, size, layout)
         else:
-            ref_res = topi.testing.upsampling_python(x_data, (scale, scale), layout)
+            ref_res = tvm.topi.testing.upsampling_python(x_data, (scale, scale), layout)
         x = relay.var("x", relay.TensorType(dshape, "float32"))
         z = relay.image.resize(x, size, layout, method, "align_corners")
         assert "size=" in z.astext()
@@ -91,9 +91,9 @@ def test_resize3d():
 
         x_data = np.random.uniform(size=dshape).astype("float32")
         if method == "trilinear":
-            ref_res = topi.testing.trilinear_resize3d_python(x_data, size, layout)
+            ref_res = tvm.topi.testing.trilinear_resize3d_python(x_data, size, layout)
         else:
-            ref_res = topi.testing.upsampling3d_python(x_data, (scale, scale, scale), layout)
+            ref_res = tvm.topi.testing.upsampling3d_python(x_data, (scale, scale, scale), layout)
         x = relay.var("x", relay.TensorType(dshape, "float32"))
         z = relay.image.resize3d(x, size, layout, method, "align_corners")
         assert "size=" in z.astext()
@@ -116,7 +116,7 @@ def test_crop_and_resize():
 
         image_data = np.random.uniform(size=img_shape).astype("float32")
 
-        ref_res = topi.testing.crop_and_resize_python(image_data,
+        ref_res = tvm.topi.testing.crop_and_resize_python(image_data,
                                                       boxes,
                                                       box_indices,
                                                       crop_size,
@@ -463,7 +463,7 @@ def test_roi_align():
         np_data = np.random.uniform(size=data_shape).astype("float32")
         np_rois = np.random.uniform(size=rois_shape).astype('float32') * in_size
         np_rois[:, 0] = np.random.randint(low = 0, high = batch, size = num_roi)
-        ref_res = topi.testing.roi_align_nchw_python(np_data, np_rois, pooled_size=pooled_size,
+        ref_res = tvm.topi.testing.roi_align_nchw_python(np_data, np_rois, pooled_size=pooled_size,
                                                      spatial_scale=spatial_scale,
                                                      sample_ratio=sample_ratio)
         for target, ctx in ctx_list():
@@ -495,7 +495,7 @@ def test_roi_pool():
         np_data = np.random.uniform(size=data_shape).astype("float32")
         np_rois = np.random.uniform(size=rois_shape).astype('float32') * in_size
         np_rois[:, 0] = np.random.randint(low = 0, high = batch, size = num_roi).astype('float32')
-        ref_res = topi.testing.roi_pool_nchw_python(np_data, np_rois, pooled_size=pooled_size,
+        ref_res = tvm.topi.testing.roi_pool_nchw_python(np_data, np_rois, pooled_size=pooled_size,
                                                      spatial_scale=spatial_scale)
         for target, ctx in ctx_list():
             intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
@@ -590,7 +590,7 @@ def test_yolo_reorg_infer_shape():
 def test_yolo_reorg():
     def verify_yolo_reorg(shape, stride):
         x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
-        ref_res = topi.testing.reorg_python(x_data, stride)
+        ref_res = tvm.topi.testing.reorg_python(x_data, stride)
 
         x = relay.var("x", relay.TensorType(shape, "float32"))
         z = relay.vision.yolo_reorg(x, stride=stride)
@@ -658,7 +658,7 @@ def test_deformable_conv2d():
         data = np.random.uniform(size=data_shape).astype(dtype)
         offset = np.random.uniform(size=offset_shape).astype(dtype)
         kernel = np.random.uniform(size=kernel_shape).astype(dtype)
-        ref_res = topi.testing.deformable_conv2d_nchw_python(data, offset, kernel, stride=(1, 1), padding=(1, 1), dilation=(1, 1), deformable_groups=deformable_groups, groups=groups)
+        ref_res = tvm.topi.testing.deformable_conv2d_nchw_python(data, offset, kernel, stride=(1, 1), padding=(1, 1), dilation=(1, 1), deformable_groups=deformable_groups, groups=groups)
 
         for target, ctx in ctx_list():
             for kind in ["graph", "debug"]:
@@ -679,7 +679,7 @@ def test_depth_to_space():
         x_data = np.random.uniform(size=dshape).astype("float32")
         if layout == "NHWC":
             x_data = np.transpose(x_data, axes=[0, 3, 1, 2])
-        ref_res = topi.testing.depth_to_space_python(x_data, block_size, mode=mode)
+        ref_res = tvm.topi.testing.depth_to_space_python(x_data, block_size, mode=mode)
         if layout == "NHWC":
             x_data = np.transpose(x_data, axes=[0, 2, 3, 1])
             ref_res = np.transpose(ref_res, axes=[0, 2, 3, 1])
@@ -711,7 +711,7 @@ def test_space_to_depth():
         x_data = np.random.uniform(size=dshape).astype("float32")
         if layout == "NHWC":
             x_data = np.transpose(x_data, axes=[0, 3, 1, 2])
-        ref_res = topi.testing.space_to_depth_python(x_data, block_size)
+        ref_res = tvm.topi.testing.space_to_depth_python(x_data, block_size)
         if layout == "NHWC":
             x_data = np.transpose(x_data, axes=[0, 2, 3, 1])
             ref_res = np.transpose(ref_res, axes=[0, 2, 3, 1])
@@ -850,7 +850,7 @@ def test_affine_grid():
 
         func = relay.Function([data], y)
         data_np = np.random.uniform(size=data_shape).astype(dtype)
-        ref_res = topi.testing.affine_grid_python(data_np, target_shape)
+        ref_res = tvm.topi.testing.affine_grid_python(data_np, target_shape)
 
         for target, ctx in ctx_list():
             for kind in ["graph", "debug"]:
@@ -876,7 +876,7 @@ def test_grid_sample():
 
         data_np = np.random.uniform(size=data_shape).astype(dtype)
         grid_np = np.random.uniform(size=grid_shape, low=-1.5, high=1.5).astype(dtype)
-        ref_res = topi.testing.grid_sample_nchw_python(data_np, grid_np, method='bilinear')
+        ref_res = tvm.topi.testing.grid_sample_nchw_python(data_np, grid_np, method='bilinear')
 
         for target, ctx in ctx_list():
             for kind in ["graph", "debug"]:
index fb60e98..19025c7 100644 (file)
@@ -20,7 +20,7 @@ from tvm import te
 import numpy as np
 from tvm import relay
 from tvm.contrib import graph_runtime
-import topi.testing
+import tvm.topi.testing
 
 def test_same_io_qnn_params():
     data_dtype = 'int32'
index 6516871..4fbb4e9 100644 (file)
@@ -20,7 +20,7 @@ from tvm import te
 import numpy as np
 from tvm import relay
 from tvm.contrib import graph_runtime
-import topi.testing
+import tvm.topi.testing
 
 # "unquantize" a quantized tensor
 def recover(data, scale, zp):
index 77105f0..85dd2ed 100644 (file)
@@ -681,7 +681,7 @@ def test_alter_layout_depthwise_conv2d():
         y = relay.Function(analysis.free_vars(y), y)
         return y
 
-    import topi
+    from tvm import topi
     def alter_conv2d(attrs, inputs, tinfos, out_type):
         with tvm.target.create("llvm"):
             return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, out_type)
@@ -1016,7 +1016,7 @@ def test_alter_layout_sum():
 def test_alter_layout_nhwc_arm():
     """ Check that AlterOplayout does not alter NHWC data layout. """
     def alter_conv2d(attrs, inputs, tinfos, out_type):
-        import topi
+        from tvm import topi
         with tvm.target.create("llvm -device=arm_cpu"):
             return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, out_type)
 
@@ -1077,7 +1077,7 @@ def test_alter_layout_nhwc_int8_aarch64():
             self.memory[key] = cfg
 
     def alter_conv2d(attrs, inputs, tinfos, out_type):
-        import topi
+        from tvm import topi
         with tvm.target.create("llvm -device=arm_cpu -mtriple=aarch64-linux-gnu"):
             with Int8Fallback():
                 tmp =  topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, out_type)
similarity index 98%
rename from topi/tests/python/common.py
rename to tests/python/topi/python/common.py
index eeaf632..735072c 100644 (file)
@@ -20,7 +20,7 @@ import tvm
 from tvm import te
 from tvm import autotvm
 from tvm.autotvm.task.space import FallbackConfigEntity
-import topi
+from tvm import topi
 
 def get_all_backend():
     """return all supported target
similarity index 95%
rename from topi/tests/python/test_fifo_buffer.py
rename to tests/python/topi/python/test_fifo_buffer.py
index 676c1f9..9af30f9 100644 (file)
@@ -18,8 +18,8 @@
 
 import tvm
 from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 import numpy as np
 from tvm.contrib.pickle_memoize import memoize
 
@@ -55,7 +55,7 @@ def verify_fifo_buffer(buffer_shape, data_shape, axis, dtype='float32'):
 
         with tvm.target.create(device):
             out = topi.nn.fifo_buffer(data, buffer, axis=axis)
-            s = topi.testing.get_injective_schedule(device)([out])
+            s = tvm.topi.testing.get_injective_schedule(device)([out])
 
         buffer_tvm = tvm.nd.array(buffer_np, ctx=ctx)
         data_tvm = tvm.nd.array(data_np, ctx=ctx)
@@ -129,11 +129,11 @@ def verify_conv1d_integration():
             return
         print('  Running on target: {}'.format(device))
 
-        conv2d_nchw, schedule_conv2d_nchw = topi.testing.get_conv2d_nchw_implement(device)
+        conv2d_nchw, schedule_conv2d_nchw = tvm.topi.testing.get_conv2d_nchw_implement(device)
 
         with tvm.target.create(device):
             out = topi.nn.fifo_buffer(inc_input, context, axis=buffer_axis)
-            s = topi.testing.get_injective_schedule(device)([out])
+            s = tvm.topi.testing.get_injective_schedule(device)([out])
             update_context = tvm.build(s, [inc_input, context, out], device, name='update_context')
 
             out = conv2d_nchw(context, kernel, stride, padding, dilate, dtype)
@@ -141,12 +141,12 @@ def verify_conv1d_integration():
             conv2d_inc = tvm.build(s, [context, kernel, out], device, name='conv2d_inc')
 
             out = topi.nn.fifo_buffer(inc_output, output_window, axis=buffer_axis)
-            s = topi.testing.get_injective_schedule(device)([out])
+            s = tvm.topi.testing.get_injective_schedule(device)([out])
             update_output_window = tvm.build(s, [inc_output, output_window, out], device,
                  name='update_output_window')
 
             out = topi.nn.fifo_buffer(inc_input, input_window, axis=buffer_axis)
-            s = topi.testing.get_injective_schedule(device)([out])
+            s = tvm.topi.testing.get_injective_schedule(device)([out])
             update_input_window = tvm.build(s, [inc_input, input_window, out], device,
                                             name='update_input_window')
 
similarity index 97%
rename from topi/tests/python/test_topi_basic.py
rename to tests/python/topi/python/test_topi_basic.py
index a83ff50..e1e5cf8 100644 (file)
@@ -16,8 +16,8 @@
 # under the License.
 import tvm
 from tvm import te
-import topi
-from topi import util
+from tvm import topi
+from tvm.topi import util
 
 
 def test_util():
@@ -18,9 +18,9 @@
 import numpy as np
 import tvm
 from tvm import te
-import topi
-import topi.testing
-from topi.util import get_const_tuple
+from tvm import topi
+import tvm.topi.testing
+from tvm.topi.util import get_const_tuple
 from tvm.contrib.pickle_memoize import memoize
 
 from common import get_all_backend
@@ -41,7 +41,7 @@ def verify_batch_matmul(batch, M, N, K):
     def get_ref_data():
         a_np = np.random.uniform(size=(batch, M, K)).astype(dtype)
         b_np = np.random.uniform(size=(batch, N, K)).astype(dtype)
-        c_np = topi.testing.batch_matmul(a_np, b_np)
+        c_np = tvm.topi.testing.batch_matmul(a_np, b_np)
         return (a_np, b_np, c_np)
     # get the test data
     a_np, b_np, c_np = get_ref_data()
@@ -53,7 +53,7 @@ def verify_batch_matmul(batch, M, N, K):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            fcompute, fschedule = topi.testing.dispatch(device, _batch_matmul_implement)
+            fcompute, fschedule = tvm.topi.testing.dispatch(device, _batch_matmul_implement)
             out = fcompute(x, y)
             s = fschedule([out])
         a = tvm.nd.array(a_np, ctx)
@@ -17,9 +17,9 @@
 import numpy as np
 import tvm
 from tvm import te
-import topi
-import topi.testing
-from topi.util import get_const_tuple
+from tvm import topi
+import tvm.topi.testing
+from tvm.topi.util import get_const_tuple
 from tvm.contrib.pickle_memoize import memoize
 
 def generate_quantized_np(shape, bits, out_dtype):
@@ -51,9 +51,9 @@ def verify_bitserial_conv2d_nchw(batch, in_size, in_channel, num_filter, kernel,
             w_ = np.copy(w_np).astype(out_dtype)
             for x in np.nditer(w_, op_flags=['readwrite']):
                 x[...] = 1 if x == 1 else -1
-            b_np = topi.testing.conv2d_nchw_python(a_np.astype(out_dtype), w_, stride, padding)
+            b_np = tvm.topi.testing.conv2d_nchw_python(a_np.astype(out_dtype), w_, stride, padding)
         else:
-            b_np = topi.testing.conv2d_nchw_python(a_np, w_np, stride, padding)
+            b_np = tvm.topi.testing.conv2d_nchw_python(a_np, w_np, stride, padding)
         return a_np, w_np, b_np
     a_np, w_np, b_np = get_ref_data()
 
@@ -89,9 +89,9 @@ def verify_bitserial_conv2d_nhwc(batch, in_size, in_channel, num_filter, kernel,
             w_ = np.copy(w_np).astype(out_dtype)
             for x in np.nditer(w_, op_flags=['readwrite']):
                 x[...] = 1 if x == 1 else -1
-            b_np = topi.testing.conv2d_nhwc_python(a_np, w_, stride, padding).astype(out_dtype)
+            b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, w_, stride, padding).astype(out_dtype)
         else:
-            b_np = topi.testing.conv2d_nhwc_python(a_np, w_np, stride, padding).astype(out_dtype)
+            b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, w_np, stride, padding).astype(out_dtype)
         return a_np, w_np, b_np
     a_np, w_np, b_np = get_ref_data()
 
@@ -19,9 +19,9 @@ import re
 import numpy as np
 import tvm
 from tvm import te
-import topi
-import topi.testing
-from topi.util import get_const_tuple
+from tvm import topi
+import tvm.topi.testing
+from tvm.topi.util import get_const_tuple
 
 def generate_quantized_np(shape, bits, out_dtype):
     np.random.seed(0)
@@ -68,9 +68,9 @@ def verify_bitserial_conv2d_nhwc(batch, in_size, in_channel, num_filter, kernel,
             w_ = np.copy(w_np).astype(out_dtype)
             for x in np.nditer(w_, op_flags=['readwrite']):
                 x[...] = 1 if x == 1 else -1
-            b_np = topi.testing.conv2d_nhwc_python(a_np, w_, stride, padding).astype(out_dtype)
+            b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, w_, stride, padding).astype(out_dtype)
         else:
-            b_np = topi.testing.conv2d_nhwc_python(a_np, w_np, stride, padding).astype(out_dtype)
+            b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, w_np, stride, padding).astype(out_dtype)
         return a_np, w_np, b_np
     a_np, w_np, b_np = get_ref_data()
     a = tvm.nd.array(a_np, ctx)
@@ -19,9 +19,9 @@ import os
 import numpy as np
 import tvm
 from tvm import te
-import topi
-import topi.testing
-from topi.util import get_const_tuple
+from tvm import topi
+import tvm.topi.testing
+from tvm.topi.util import get_const_tuple
 from tvm.contrib.pickle_memoize import memoize
 
 _bitserial_dense_implement = {
@@ -57,7 +57,7 @@ def verify_bitserial_dense(batch, in_dim, out_dim, activation_bits, weight_bits,
         input_dtype = 'uint8' if "arm_cpu" in target else "uint32"
         A = te.placeholder((batch, in_dim), dtype=input_dtype, name='A')
         B = te.placeholder((out_dim, in_dim), dtype=input_dtype, name='B')
-        fcompute, fschedule = topi.testing.dispatch(target, _bitserial_dense_implement)
+        fcompute, fschedule = tvm.topi.testing.dispatch(target, _bitserial_dense_implement)
         C = fcompute(A, B, activation_bits, weight_bits,
                      input_dtype, out_dtype, unipolar)
         s = fschedule([C])
similarity index 97%
rename from topi/tests/python/test_topi_bnn.py
rename to tests/python/topi/python/test_topi_bnn.py
index 275f34f..ac16460 100644 (file)
@@ -18,8 +18,8 @@
 import numpy as np
 import tvm
 from tvm import te
-import topi
-from topi.util import get_const_tuple
+from tvm import topi
+from tvm.topi.util import get_const_tuple
 from tvm.contrib.pickle_memoize import memoize
 
 
similarity index 97%
rename from topi/tests/python/test_topi_broadcast.py
rename to tests/python/topi/python/test_topi_broadcast.py
index f3e0300..4ac985e 100644 (file)
@@ -18,8 +18,8 @@
 import numpy as np
 import tvm
 from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 from common import get_all_backend
 
 
@@ -35,7 +35,7 @@ def verify_broadcast_to_ele(in_shape, out_shape, fbcast):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_broadcast_schedule(device)(B)
+            s = tvm.topi.testing.get_broadcast_schedule(device)(B)
         foo = tvm.build(s, [A, B], device, name="broadcast_to")
         data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
         out_npy = np.broadcast_to(data_npy, out_shape)
@@ -83,7 +83,7 @@ def verify_broadcast_binary_ele(lhs_shape, rhs_shape,
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_broadcast_schedule(device)(C)
+            s = tvm.topi.testing.get_broadcast_schedule(device)(C)
         foo = tvm.build(s, [A, B, C], device, name="broadcast_binary" + "_" + ftopi.__name__)
 
         lhs_npy, lhs_nd = gen_operand(lhs_shape, lhs_min, lhs_max, ctx)
@@ -245,7 +245,7 @@ def test_logical_single_ele():
                 return
             print("Running on target: %s" % device)
             with tvm.target.create(device):
-                s = topi.testing.get_broadcast_schedule(device)(B)
+                s = tvm.topi.testing.get_broadcast_schedule(device)(B)
             foo = tvm.build(s, [A, B], device, name=name)
 
             data_npy = indata.astype(A.dtype)
@@ -286,7 +286,7 @@ def test_bitwise_not():
                 return
             print("Running on target: %s" % device)
             with tvm.target.create(device):
-                s = topi.testing.get_broadcast_schedule(device)(B)
+                s = tvm.topi.testing.get_broadcast_schedule(device)(B)
             foo = tvm.build(s, [A, B], device, name=name)
 
             data_npy = np.random.uniform(size=shape).astype(A.dtype)
@@ -328,7 +328,7 @@ def test_logical_binary_ele():
                 return
             print("Running on target: %s" % device)
             with tvm.target.create(device):
-                s = topi.testing.get_broadcast_schedule(device)(C)
+                s = tvm.topi.testing.get_broadcast_schedule(device)(C)
             foo = tvm.build(s, [A, B, C], device, name=name)
 
             lhs_nd = tvm.nd.array(lhs, ctx)
similarity index 93%
rename from topi/tests/python/test_topi_clip.py
rename to tests/python/topi/python/test_topi_clip.py
index 38617ee..b3d95dd 100644 (file)
@@ -18,9 +18,9 @@
 import numpy as np
 import tvm
 from tvm import te
-import topi
-import topi.testing
-from topi.util import get_const_tuple
+from tvm import topi
+import tvm.topi.testing
+from tvm.topi.util import get_const_tuple
 from tvm.contrib.pickle_memoize import memoize
 
 from common import get_all_backend
@@ -45,7 +45,7 @@ def verify_clip(N, a_min, a_max, dtype):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_injective_schedule(device)(B)
+            s = tvm.topi.testing.get_injective_schedule(device)(B)
 
         a = tvm.nd.array(a_np, ctx)
         b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), ctx)
similarity index 92%
rename from topi/tests/python/test_topi_conv1d.py
rename to tests/python/topi/python/test_topi_conv1d.py
index 972a3f1..49f2cd1 100644 (file)
@@ -19,10 +19,10 @@ import numpy as np
 import itertools
 import tvm
 from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 from tvm.contrib.pickle_memoize import memoize
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
 from common import get_all_backend
 
 
@@ -67,7 +67,7 @@ def verify_conv1d(batch,
         else:
             np_in = a_np
             np_w = w_np
-        b_np = topi.testing.conv1d_ncw_python(np_in, np_w, stride, padding, dilation)
+        b_np = tvm.topi.testing.conv1d_ncw_python(np_in, np_w, stride, padding, dilation)
         if layout == 'NWC':
             b_np = np.transpose(b_np, [0, 2, 1])
         return a_np, w_np, b_np
@@ -80,9 +80,9 @@ def verify_conv1d(batch,
             print("Skip because %s is not enabled" % device)
             return
         if layout == "NCW":
-            fcompute, fschedule = topi.testing.dispatch(device, _conv1d_ncw_implement)
+            fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv1d_ncw_implement)
         else:
-            fcompute, fschedule = topi.testing.dispatch(device, _conv1d_nwc_implement)
+            fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv1d_nwc_implement)
         with tvm.target.create(device):
             B = fcompute(A, W, stride, padding, dilation, 'float32')
             s = fschedule([B])
@@ -19,10 +19,10 @@ import numpy as np
 import itertools
 import tvm
 from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 from tvm.contrib.pickle_memoize import memoize
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
 from common import get_all_backend
 
 _conv1d_transpose_ncw_implement = {
@@ -43,7 +43,7 @@ def verify_conv1d_transpose_ncw(batch, in_channel, in_size, num_filter, kernel,
     def get_ref_data():
         a_np = np.random.uniform(size=a_shape).astype(dtype)
         w_np = np.random.uniform(size=w_shape).astype(dtype)
-        b_np = topi.testing.conv1d_transpose_ncw_python(a_np, w_np, stride, padding, output_padding)
+        b_np = tvm.topi.testing.conv1d_transpose_ncw_python(a_np, w_np, stride, padding, output_padding)
         c_np = np.maximum(b_np, 0)
         return a_np, w_np, b_np, c_np
 
@@ -55,7 +55,7 @@ def verify_conv1d_transpose_ncw(batch, in_channel, in_size, num_filter, kernel,
             print("Skip because %s is not enabled" % device)
             return
         with tvm.target.create(device):
-            fcompute, fschedule = topi.testing.dispatch(device, _conv1d_transpose_ncw_implement)
+            fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv1d_transpose_ncw_implement)
             B = fcompute(A, W, stride, padding, A.dtype, output_padding)
             C = topi.nn.relu(B)
             s1 = fschedule([B])
@@ -20,11 +20,11 @@ import numpy as np
 import tvm
 from tvm import te
 from tvm import autotvm
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 from tvm.contrib.pickle_memoize import memoize
-from topi.nn.util import get_pad_tuple
-from topi.util import get_const_tuple
+from tvm.topi.nn.util import get_pad_tuple
+from tvm.topi.util import get_const_tuple
 
 from common import get_all_backend
 
@@ -81,8 +81,8 @@ def verify_conv2d_NCHWc(batch, in_channel, in_size, num_filter, kernel, stride,
         a_np = np.random.uniform(size=(batch, in_channel, in_height, in_width)).astype(dtype)
         w_np = np.random.uniform(size=(num_filter, in_channel, kernel, kernel)).astype(dtype)
         b_np = np.random.uniform(size=(num_filter, 1, 1)).astype(dtype)
-        dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
-        c_np = topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding)
+        dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
+        c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding)
         if add_bias:
             c_np += b_np
         if add_relu:
@@ -19,10 +19,10 @@ import os
 import numpy as np
 import tvm
 from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 from tvm.contrib.pickle_memoize import memoize
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
 
 
 _conv2d_hwcn_implement = {
@@ -48,8 +48,8 @@ def verify_conv2d_hwcn(batch, in_channel, in_size, num_filter, kernel, stride, p
         a_np = np.random.uniform(size=a_shape).astype(dtype)
         w_np = np.random.uniform(size=w_shape).astype(dtype)
         b_np = np.random.uniform(size=b_shape).astype(dtype)
-        dw_np = topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
-        c1_np = topi.testing.conv2d_hwcn_python(a_np, dw_np, stride, padding)
+        dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
+        c1_np = tvm.topi.testing.conv2d_hwcn_python(a_np, dw_np, stride, padding)
         c2_np = c1_np + b_np
         c3_np = np.maximum(c2_np, 0)
         return a_np, w_np, b_np, c1_np, c2_np, c3_np
@@ -63,7 +63,7 @@ def verify_conv2d_hwcn(batch, in_channel, in_size, num_filter, kernel, stride, p
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            fcompute, fschedule = topi.testing.dispatch(device, _conv2d_hwcn_implement)
+            fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv2d_hwcn_implement)
             t_conv = fcompute(A, W, stride, padding, dilation)
             t_bias = topi.add(t_conv, B)
             t_relu = topi.nn.relu(t_bias)
@@ -21,12 +21,12 @@ import tvm
 from tvm import te
 from tvm import autotvm
 from tvm.autotvm.task.space import FallbackConfigEntity
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 from tvm.contrib.pickle_memoize import memoize
-from topi.nn.util import get_pad_tuple
-from topi.util import get_const_tuple
-from topi.arm_cpu.conv2d_gemm import is_aarch64_arm
+from tvm.topi.nn.util import get_pad_tuple
+from tvm.topi.util import get_const_tuple
+from tvm.topi.arm_cpu.conv2d_gemm import is_aarch64_arm
 
 from common import get_all_backend, Int8Fallback
 
@@ -113,8 +113,8 @@ def verify_conv2d_NHWC_gemm_int8(batch, in_channel, in_size, num_filter, kernel,
         a_np = np.random.randint(low=-128, high=127, size=a_shape).astype(dtype)
         w_np = np.random.randint(low=-128, high=128, size=w_shape).astype(dtype)
         b_np = np.random.uniform(size=bias_shape).astype(dtype)
-        dw_np = topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
-        c_np = topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding).astype(dtype)
+        dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
+        c_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding).astype(dtype)
 
         if add_bias:
             b_np = np.random.uniform(size=bias_shape).astype(dtype)
@@ -203,8 +203,8 @@ def verify_conv2d_NCHWc_int8(batch, in_channel, in_size, num_filter, kernel, str
         a_np = np.random.randint(low=-128, high=127, size=a_shape).astype(dtype)
         w_np = np.random.randint(low=-128, high=128, size=w_shape).astype(dtype)
         b_np = np.random.uniform(size=bias_shape).astype(dtype)
-        dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
-        c_np = topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding).astype(dtype)
+        dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
+        c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding).astype(dtype)
 
         # convert to NCHWc
         _, _, out_height, out_width = c_np.shape
@@ -278,8 +278,8 @@ def verify_conv2d_nchw_int8(batch, in_channel, in_size, num_filter, kernel, stri
         a_np = np.random.randint(low=-128, high=127, size=a_shape).astype(dtype)
         w_np = np.random.randint(low=-128, high=128, size=w_shape).astype(dtype)
         b_np = np.random.uniform(size=bias_shape).astype(dtype)
-        dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
-        c_np = topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding).astype(dtype)
+        dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
+        c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding).astype(dtype)
 
         if add_bias:
             b_np = np.random.uniform(size=bias_shape).astype(dtype)
@@ -20,11 +20,11 @@ import numpy as np
 import tvm
 from tvm import te
 from tvm import autotvm
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 from tvm.contrib.pickle_memoize import memoize
-from topi.nn.util import get_pad_tuple
-from topi.util import get_const_tuple
+from tvm.topi.nn.util import get_pad_tuple
+from tvm.topi.util import get_const_tuple
 
 from common import get_all_backend
 
@@ -51,8 +51,8 @@ def verify_conv2d_nchw(batch, in_channel, in_size, num_filter, kernel, stride, p
         a_np = np.random.uniform(size=a_shape).astype(dtype)
         w_np = np.random.uniform(size=w_shape).astype(dtype)
         b_np = np.random.uniform(size=bias_shape).astype(dtype)
-        dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
-        c_np = topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding)
+        dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
+        c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding)
         if add_bias:
             c_np += b_np
         if add_relu:
@@ -71,7 +71,7 @@ def verify_conv2d_nchw(batch, in_channel, in_size, num_filter, kernel, stride, p
         if "cudnn" in device:
             fcompute, fschedule = topi.cuda.conv2d_cudnn, topi.cuda.schedule_conv2d_cudnn
         else:
-            fcompute, fschedule = topi.testing.get_conv2d_nchw_implement(device)
+            fcompute, fschedule = tvm.topi.testing.get_conv2d_nchw_implement(device)
 
         with tvm.target.create(device):
             if "cudnn" in device:
@@ -19,10 +19,10 @@ import os
 import numpy as np
 import tvm
 from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 from tvm.contrib.pickle_memoize import memoize
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
 
 
 
@@ -50,8 +50,8 @@ def verify_conv2d_nhwc(batch, in_channel, in_size, num_filter, kernel, stride, p
     def get_ref_data():
         a_np = np.random.uniform(size=a_shape).astype(dtype)
         w_np = np.random.uniform(size=w_shape).astype(dtype)
-        dw_np = topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
-        b_np = topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
+        dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
+        b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
         return a_np, w_np, b_np
     a_np, w_np, b_np = get_ref_data()
 
@@ -61,7 +61,7 @@ def verify_conv2d_nhwc(batch, in_channel, in_size, num_filter, kernel, stride, p
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            fcompute, fschedule = topi.testing.dispatch(device, _conv2d_nhwc_implement)
+            fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv2d_nhwc_implement)
             B = fcompute(A, W, stride, padding, dilation, dtype)
             s = fschedule([B])
         ctx = tvm.context(device, 0)
@@ -22,10 +22,10 @@ import tvm
 from tvm import te
 from tvm import autotvm
 from tvm.autotvm.task.space import FallbackConfigEntity
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 from tvm.contrib.pickle_memoize import memoize
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
 
 
 def verify_conv2d_1x1_nhwc_pack_int8(batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation=1):
@@ -43,8 +43,8 @@ def verify_conv2d_1x1_nhwc_pack_int8(batch, in_channel, in_size, num_filter, ker
     def get_ref_data():
         a_np = np.random.uniform(size=a_shape).astype(adtype)
         w_np = np.random.uniform(size=w_shape).astype(wdtype)
-        dw_np = topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
-        b_np = topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
+        dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
+        b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
         return a_np, w_np, b_np
 
     a_np, w_np, b_np = get_ref_data()
 
 import numpy as np
 import tvm
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 from tvm import te
 from tvm.contrib.pickle_memoize import memoize
 from tvm.contrib import nvcc
-from topi.nn.util import get_pad_tuple
-from topi.util import get_const_tuple
+from tvm.topi.nn.util import get_pad_tuple
+from tvm.topi.util import get_const_tuple
 
 
 _conv2d_nhwc_tensorcore_implement = {
@@ -57,8 +57,8 @@ def verify_conv2d_nhwc(batch, in_channel, in_size, num_filter, kernel, stride,
         a_np = np.random.uniform(size=a_shape).astype(dtype)
         w_np = np.random.uniform(size=w_shape).astype(dtype)
         b_np = np.random.uniform(size=bias_shape).astype(dtype)
-        dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
-        c_np = topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
+        dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
+        c_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
         if add_bias:
             b_np = np.random.uniform(size=bias_shape).astype(dtype)
             c_np += b_np
@@ -78,7 +78,7 @@ def verify_conv2d_nhwc(batch, in_channel, in_size, num_filter, kernel, stride,
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            fcompute, fschedule = topi.testing.dispatch(device, _conv2d_nhwc_tensorcore_implement)
+            fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv2d_nhwc_tensorcore_implement)
             C = fcompute(A, W, stride, padding, dilation, 'float32')
             if add_bias:
                 C = topi.add(C, bias)
 
 import numpy as np
 import tvm
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 from tvm import te
 from tvm.contrib.pickle_memoize import memoize
 from tvm.contrib import nvcc
-from topi.nn.util import get_pad_tuple
-from topi.util import get_const_tuple
+from tvm.topi.nn.util import get_pad_tuple
+from tvm.topi.util import get_const_tuple
 
 
 _conv2d_nhwc_winograd_tensorcore = {
@@ -65,8 +65,8 @@ def verify_conv2d_nhwc(batch, in_channel, in_size, num_filter, kernel, stride,
         a_np = np.random.uniform(size=a_shape).astype(dtype)
         w_np = np.random.uniform(size=w_shape).astype(dtype)
         b_np = np.random.uniform(size=bias_shape).astype(dtype)
-        dw_np = topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
-        c_np = topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
+        dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
+        c_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
         if add_bias:
             b_np = np.random.uniform(size=bias_shape).astype(dtype)
             c_np += b_np
@@ -84,10 +84,10 @@ def verify_conv2d_nhwc(batch, in_channel, in_size, num_filter, kernel, stride,
         print("Running on target: %s" % device)
         with tvm.target.create(device):
             if bgemm == "direct":
-                fcompute, fschedule = topi.testing.dispatch(device,
+                fcompute, fschedule = tvm.topi.testing.dispatch(device,
                                                             _conv2d_nhwc_winograd_direct)
             elif bgemm == "tensorcore":
-                fcompute, fschedule = topi.testing.dispatch(device,
+                fcompute, fschedule = tvm.topi.testing.dispatch(device,
                                                             _conv2d_nhwc_winograd_tensorcore)
             C = fcompute(A, W, stride, padding, dilation, 'float32')
             if add_bias:
 import numpy as np
 import tvm
 from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 from tvm.contrib.pickle_memoize import memoize
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
 
 from common import get_all_backend
 
@@ -51,7 +51,7 @@ def verify_conv2d_transpose_nchw(batch, in_channel, in_size, num_filter, kernel,
     def get_ref_data():
         a_np = np.random.uniform(size=a_shape).astype(dtype)
         w_np = np.random.uniform(size=w_shape).astype(dtype)
-        b_np = topi.testing.conv2d_transpose_nchw_python(a_np, w_np, stride, padding, output_padding)
+        b_np = tvm.topi.testing.conv2d_transpose_nchw_python(a_np, w_np, stride, padding, output_padding)
         c_np = np.maximum(b_np, 0)
         return a_np, w_np, b_np, c_np
 
@@ -64,7 +64,7 @@ def verify_conv2d_transpose_nchw(batch, in_channel, in_size, num_filter, kernel,
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            fcompute, fschedule = topi.testing.dispatch(device, _conv2d_transpose_nchw_implement)
+            fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv2d_transpose_nchw_implement)
             B = fcompute(A, W,
                          [stride_height, stride_width],
                          [pad_top, pad_left, pad_bottom, pad_right],
@@ -21,11 +21,11 @@ import tvm
 from tvm import te
 from tvm import autotvm
 from tvm.autotvm.task.space import FallbackConfigEntity
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 from tvm.contrib.pickle_memoize import memoize
-from topi.nn.util import get_pad_tuple
-from topi.util import get_const_tuple
+from tvm.topi.nn.util import get_pad_tuple
+from tvm.topi.util import get_const_tuple
 
 
 _conv2d_nchw_winograd_implement = {
@@ -57,8 +57,8 @@ def verify_conv2d_nchw(batch, in_channel, in_size, num_filter, kernel, stride, p
         a_np = np.random.uniform(size=a_shape).astype(dtype)
         w_np = np.random.uniform(size=w_shape).astype(dtype)
         b_np = np.random.uniform(size=bias_shape).astype(dtype)
-        dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
-        c_np = topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding)
+        dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
+        c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding)
         if add_bias:
             b_np = np.random.uniform(size=bias_shape).astype(dtype)
             c_np += b_np
@@ -75,7 +75,7 @@ def verify_conv2d_nchw(batch, in_channel, in_size, num_filter, kernel, stride, p
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            fcompute, fschedule = topi.testing.dispatch(device, _conv2d_nchw_winograd_implement)
+            fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv2d_nchw_winograd_implement)
             C = fcompute(A, W, stride, padding, dilation, dtype)
             if add_bias:
                 C = topi.add(C, bias)
@@ -20,11 +20,11 @@ import numpy as np
 import tvm
 from tvm import te
 from tvm import autotvm
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 from tvm.contrib.pickle_memoize import memoize
-from topi.nn.util import get_pad_tuple3d
-from topi.util import get_const_tuple
+from tvm.topi.nn.util import get_pad_tuple3d
+from tvm.topi.util import get_const_tuple
 
 from common import get_all_backend
 
@@ -56,8 +56,8 @@ def verify_conv3d_ncdhw(batch, in_channel, in_size, num_filter, kernel, stride,
         a_np = np.random.uniform(size=a_shape).astype(dtype)
         w_np = np.random.uniform(size=w_shape).astype(dtype)
         b_np = np.random.uniform(size=bias_shape).astype(dtype)
-        dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation, dilation))
-        c_np = topi.testing.conv3d_ncdhw_python(a_np, dw_np, stride, padding)
+        dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation, dilation))
+        c_np = tvm.topi.testing.conv3d_ncdhw_python(a_np, dw_np, stride, padding)
         if add_bias:
             c_np += b_np
         if add_relu:
@@ -72,7 +72,7 @@ def verify_conv3d_ncdhw(batch, in_channel, in_size, num_filter, kernel, stride,
             print("Skip because %s is not enabled" % device)
             return
         print("Running on target: %s" % device)
-        fcompute, fschedule = topi.testing.dispatch(device, _conv3d_ncdhw_implement)
+        fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv3d_ncdhw_implement)
         with tvm.target.create(device):
             C = fcompute(A, W, (stride, stride, stride), padding,
                          (dilation, dilation, dilation), dtype)
@@ -19,10 +19,10 @@ import os
 import numpy as np
 import tvm
 from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 from tvm.contrib.pickle_memoize import memoize
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
 
 from common import get_all_backend
 
@@ -53,8 +53,8 @@ def verify_conv3d_ndhwc(batch, in_channel, in_size, num_filter, kernel, stride,
     def get_ref_data():
         a_np = np.random.uniform(size=a_shape).astype(dtype)
         w_np = np.random.uniform(size=w_shape).astype(dtype)
-        dw_np = topi.testing.dilate_python(w_np, (dilation, dilation, dilation, 1, 1))
-        b_np = topi.testing.conv3d_ndhwc_python(a_np, dw_np, stride, padding)
+        dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, dilation, 1, 1))
+        b_np = tvm.topi.testing.conv3d_ndhwc_python(a_np, dw_np, stride, padding)
         return a_np, w_np, b_np
     a_np, w_np, b_np = get_ref_data()
 
@@ -64,7 +64,7 @@ def verify_conv3d_ndhwc(batch, in_channel, in_size, num_filter, kernel, stride,
             print("Skip because %s is not enabled" % device)
             return
         print("Running on target: %s" % device)
-        fcompute, fschedule = topi.testing.dispatch(device, _conv3d_ndhwc_implement)
+        fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv3d_ndhwc_implement)
         with tvm.target.create(device):
             B = fcompute(A, W, stride, padding, dilation, dtype)
             s = fschedule([B])
 
 import numpy as np
 import tvm
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 from tvm import te
 from tvm.contrib.pickle_memoize import memoize
 from tvm.contrib import nvcc
-from topi.nn.util import get_pad_tuple3d
-from topi.util import get_const_tuple
+from tvm.topi.nn.util import get_pad_tuple3d
+from tvm.topi.util import get_const_tuple
 
 
 _conv3d_ndhwc_tensorcore_implement = {
@@ -58,8 +58,8 @@ def verify_conv3d_ndhwc(batch, in_channel, in_size, num_filter, kernel, stride,
         a_np = np.random.uniform(size=a_shape).astype(dtype)
         w_np = np.random.uniform(size=w_shape).astype(dtype)
         b_np = np.random.uniform(size=bias_shape).astype(dtype)
-        dw_np = topi.testing.dilate_python(w_np, (1, 1, 1, dilation, dilation))
-        c_np = topi.testing.conv3d_ndhwc_python(a_np, dw_np, stride, padding)
+        dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, 1, dilation, dilation))
+        c_np = tvm.topi.testing.conv3d_ndhwc_python(a_np, dw_np, stride, padding)
         if add_bias:
             b_np = np.random.uniform(size=bias_shape).astype(dtype)
             c_np += b_np
@@ -79,7 +79,7 @@ def verify_conv3d_ndhwc(batch, in_channel, in_size, num_filter, kernel, stride,
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            fcompute, fschedule = topi.testing.dispatch(device, _conv3d_ndhwc_tensorcore_implement)
+            fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv3d_ndhwc_tensorcore_implement)
             C = fcompute(A, W, stride, padding, dilation, 'float32')
             if add_bias:
                 C = topi.add(C, bias)
 import numpy as np
 import tvm
 from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 from tvm.contrib.pickle_memoize import memoize
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
 
 from common import get_all_backend
 
@@ -49,7 +49,7 @@ def verify_conv3d_transpose_ncdhw(batch, in_channel, in_size, num_filter, kernel
     def get_ref_data():
         a_np = np.random.uniform(size=a_shape).astype(dtype)
         w_np = np.random.uniform(size=w_shape).astype(dtype)
-        b_np = topi.testing.conv3d_transpose_ncdhw_python(a_np, w_np, stride, padding)
+        b_np = tvm.topi.testing.conv3d_transpose_ncdhw_python(a_np, w_np, stride, padding)
         c_np = np.maximum(b_np, 0)
         return a_np, w_np, b_np, c_np
 
@@ -62,7 +62,7 @@ def verify_conv3d_transpose_ncdhw(batch, in_channel, in_size, num_filter, kernel
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            fcompute, fschedule = topi.testing.dispatch(device, _conv3d_transpose_ncdhw_implement)
+            fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv3d_transpose_ncdhw_implement)
             B = fcompute(A, W,
                          [stride_depth, stride_height, stride_width],
                          [pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right],
@@ -20,11 +20,11 @@ import numpy as np
 import tvm
 from tvm import te
 from tvm import autotvm
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 from tvm.contrib.pickle_memoize import memoize
-from topi.nn.util import get_pad_tuple3d
-from topi.util import get_const_tuple
+from tvm.topi.nn.util import get_pad_tuple3d
+from tvm.topi.util import get_const_tuple
 
 from common import get_all_backend
 
@@ -66,8 +66,8 @@ def verify_conv3d_ncdhw(batch,
         a_np = np.random.uniform(size=a_shape).astype(dtype)
         w_np = np.random.uniform(size=w_shape).astype(dtype)
         b_np = np.random.uniform(size=bias_shape).astype(dtype)
-        dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation, dilation))
-        c_np = topi.testing.conv3d_ncdhw_python(a_np, dw_np, stride, padding)
+        dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation, dilation))
+        c_np = tvm.topi.testing.conv3d_ncdhw_python(a_np, dw_np, stride, padding)
         if add_bias:
             c_np += b_np
         if add_relu:
@@ -82,7 +82,7 @@ def verify_conv3d_ncdhw(batch,
             print("Skip because %s is not enabled" % device)
             return
         print("Running on target: %s" % device)
-        fcompute, fschedule = topi.testing.dispatch(device, _conv3d_ncdhw_implement)
+        fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv3d_ncdhw_implement)
         with tvm.target.create(device):
             C = fcompute(A, W, (stride, stride, stride), padding, (dilation, dilation, dilation),
                          dtype)
@@ -19,10 +19,10 @@ import numpy as np
 import tvm
 from tvm import te
 from tvm import autotvm
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 from tvm.contrib.pickle_memoize import memoize
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
 
 from common import get_all_backend
 
@@ -47,7 +47,7 @@ def verify_correlation_nchw(data_shape, kernel_size, max_displacement, stride1,
     def get_ref_data():
         a_np = np.random.uniform(size=data_shape).astype(dtype)
         b_np = np.random.uniform(size=data_shape).astype(dtype)
-        c_np = topi.testing.correlation_nchw_python(a_np, b_np, kernel_size, max_displacement, stride1, stride2, pad_size, is_multiply)
+        c_np = tvm.topi.testing.correlation_nchw_python(a_np, b_np, kernel_size, max_displacement, stride1, stride2, pad_size, is_multiply)
         return a_np, b_np, c_np
 
     a_np, b_np, c_np = get_ref_data()
@@ -58,7 +58,7 @@ def verify_correlation_nchw(data_shape, kernel_size, max_displacement, stride1,
             print("Skip because %s is not enabled" % device)
             return
         print("Running on target: %s" % device)
-        fcompute, fschedule = topi.testing.dispatch(
+        fcompute, fschedule = tvm.topi.testing.dispatch(
             device, _correlation_implement)
         with tvm.target.create(device):
             C = fcompute(A, B, kernel_size, max_displacement, stride1, stride2, pad_size, is_multiply)
@@ -18,10 +18,10 @@ import numpy as np
 import tvm
 from tvm import te
 from tvm import autotvm
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 from tvm.contrib.pickle_memoize import memoize
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
 
 from common import get_all_backend
 
@@ -53,7 +53,7 @@ def verify_deformable_conv2d_nchw(batch, in_channel, in_size, num_filter, kernel
         offset_np = np.random.randn(*offset_shape).astype(dtype)
         w_np = np.random.uniform(size=w_shape).astype(dtype)
         b_np = np.random.uniform(size=bias_shape).astype(dtype)
-        c_np = topi.testing.deformable_conv2d_nchw_python(a_np, offset_np, w_np, stride, padding,
+        c_np = tvm.topi.testing.deformable_conv2d_nchw_python(a_np, offset_np, w_np, stride, padding,
                                                           dilation, deformable_groups, groups)
 
         return a_np, offset_np, w_np, c_np
@@ -66,7 +66,7 @@ def verify_deformable_conv2d_nchw(batch, in_channel, in_size, num_filter, kernel
             print("Skip because %s is not enabled" % device)
             return
         print("Running on target: %s" % device)
-        fcompute, fschedule = topi.testing.dispatch(device, _deformable_conv2d_implement)
+        fcompute, fschedule = tvm.topi.testing.dispatch(device, _deformable_conv2d_implement)
         with tvm.target.create(device):
             C = fcompute(A, Offset, W, stride, padding, dilation,
                          deformable_groups, groups, dtype)
similarity index 97%
rename from topi/tests/python/test_topi_dense.py
rename to tests/python/topi/python/test_topi_dense.py
index 6294c7d..517cb4d 100644 (file)
@@ -18,9 +18,9 @@
 import numpy as np
 import tvm
 from tvm import te
-import topi
-import topi.testing
-from topi.util import get_const_tuple
+from tvm import topi
+import tvm.topi.testing
+from tvm.topi.util import get_const_tuple
 from tvm.contrib.pickle_memoize import memoize
 
 from common import get_all_backend, Int8Fallback
@@ -63,7 +63,7 @@ def verify_dense(batch, in_dim, out_dim, use_bias=True):
             print("Skip because %s is not enabled" % device)
             return
         print("Running on target: %s" % device)
-        for fcompute, fschedule in topi.testing.dispatch(device, _dense_implement):
+        for fcompute, fschedule in tvm.topi.testing.dispatch(device, _dense_implement):
             with tvm.target.create(device):
                 D = fcompute(A, B, C if use_bias else None)
                 D = topi.nn.relu(D)
@@ -18,9 +18,9 @@
 """Test code for dense tensorcore operator"""
 import numpy as np
 import tvm
-import topi
-import topi.testing
-from topi.util import get_const_tuple
+from tvm import topi
+import tvm.topi.testing
+from tvm.topi.util import get_const_tuple
 from tvm import te
 from tvm.contrib.pickle_memoize import memoize
 from tvm.contrib import nvcc
@@ -60,7 +60,7 @@ def verify_dense(batch, in_dim, out_dim, use_bias=True):
             print("skip because gpu does not support Tensor Cores")
             return
         print("Running on target: %s" % device)
-        for fcompute, fschedule in topi.testing.dispatch(device, _dense_implement):
+        for fcompute, fschedule in tvm.topi.testing.dispatch(device, _dense_implement):
             with tvm.target.create(device):
                 D = fcompute(A, B, C if use_bias else None)
                 D = topi.nn.relu(D)
@@ -18,8 +18,8 @@
 import numpy as np
 import tvm
 from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 
 from common import get_all_backend
 
@@ -45,7 +45,7 @@ def verify_depth_to_space(block_size, batch, in_channel, in_height, in_width, la
     B = topi.nn.depth_to_space(A, block_size=block_size, layout=layout, mode=mode)
     if layout == 'NHWC':
         a_np = np.transpose(a_np, axes=[0, 3, 1, 2])
-    b_np = topi.testing.depth_to_space_python(a_np, block_size, mode=mode)
+    b_np = tvm.topi.testing.depth_to_space_python(a_np, block_size, mode=mode)
     if layout == 'NHWC':
         a_np = np.transpose(a_np, axes=[0, 2, 3, 1])
         b_np = np.transpose(b_np, axes=[0, 2, 3, 1])
@@ -57,7 +57,7 @@ def verify_depth_to_space(block_size, batch, in_channel, in_height, in_width, la
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_injective_schedule(device)(B)
+            s = tvm.topi.testing.get_injective_schedule(device)(B)
         a = tvm.nd.array(a_np, ctx)
         b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx)
         f = tvm.build(s, [A, B], device)
 import tvm
 from tvm import te
 from tvm import autotvm
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 import numpy as np
-from topi.util import get_const_tuple
-from topi.nn.util import get_pad_tuple
+from tvm.topi.util import get_const_tuple
+from tvm.topi.nn.util import get_pad_tuple
 from tvm.contrib.pickle_memoize import memoize
 
 from common import get_all_backend
@@ -73,7 +73,7 @@ def depthwise_conv2d_with_workload_nchw(batch, in_channel, in_height, channel_mu
             return
         print("Running on target: %s" % device)
 
-        impl_list = topi.testing.dispatch(device, _depthwise_conv2d_nchw_implement)[:]
+        impl_list = tvm.topi.testing.dispatch(device, _depthwise_conv2d_nchw_implement)[:]
         if device == "llvm" and channel_multiplier == 1 and dilation == 1:
             impl_list.append((topi.x86.depthwise_conv2d_nchw, topi.x86.schedule_depthwise_conv2d_nchw))
 
@@ -105,11 +105,11 @@ def depthwise_conv2d_with_workload_nchw(batch, in_channel, in_height, channel_mu
             def get_ref_data():
                 input_np = np.random.uniform(size=input_shape).astype(dtype)
                 filter_np = np.random.uniform(size=filter_shape).astype(dtype)
-                dilated_filter_np = topi.testing.dilate_python(filter_np, (1, 1, dilation, dilation))
+                dilated_filter_np = tvm.topi.testing.dilate_python(filter_np, (1, 1, dilation, dilation))
                 scale_np = np.random.uniform(size=scale_shape).astype(dtype)
                 shift_np = np.random.uniform(size=shift_shape).astype(dtype)
                 # correctness with scipy
-                depthwise_conv2d_scipy = topi.testing.depthwise_conv2d_python_nchw(
+                depthwise_conv2d_scipy = tvm.topi.testing.depthwise_conv2d_python_nchw(
                     input_np, dilated_filter_np, stride, padding)
                 scale_shift_scipy = np.zeros(shape=scale_shift_shape)
                 for c in range(in_channel * channel_multiplier):
@@ -176,7 +176,7 @@ def depthwise_conv2d_with_workload_nhwc(batch, in_channel, in_height, channel_mu
             return
         print("Running on target: %s" % device)
 
-        fcompute, fschedule = topi.testing.dispatch(device, _depthwise_conv2d_nhwc_implement)
+        fcompute, fschedule = tvm.topi.testing.dispatch(device, _depthwise_conv2d_nhwc_implement)
         with tvm.target.create(device):
             # declare
             DepthwiseConv2d = fcompute(Input, Filter,
@@ -204,11 +204,11 @@ def depthwise_conv2d_with_workload_nhwc(batch, in_channel, in_height, channel_mu
         def get_ref_data():
             input_np = np.random.uniform(size=input_shape).astype(dtype)
             filter_np = np.random.uniform(size=filter_shape).astype(dtype)
-            dilated_filter_np = topi.testing.dilate_python(filter_np, (dilation, dilation, 1, 1))
+            dilated_filter_np = tvm.topi.testing.dilate_python(filter_np, (dilation, dilation, 1, 1))
             scale_np = np.random.uniform(size=scale_shape).astype(dtype)
             shift_np = np.random.uniform(size=shift_shape).astype(dtype)
             # correctness with scipy
-            depthwise_conv2d_scipy = topi.testing.depthwise_conv2d_python_nhwc(
+            depthwise_conv2d_scipy = tvm.topi.testing.depthwise_conv2d_python_nhwc(
                 input_np, dilated_filter_np, stride=[stride_h, stride_w], padding=padding)
             scale_shift_scipy = np.zeros(shape=scale_shift_shape)
             for c in range(in_channel * channel_multiplier):
@@ -329,7 +329,7 @@ def depthwise_conv2d_with_workload_NCHWc(batch, in_channel, in_height, channel_m
             input_np = np.random.uniform(size=input_shape).astype(dtype)
             filter_np = np.random.uniform(size=filter_shape).astype(dtype)
             # correctness with scipy
-            depthwise_conv2d_scipy = topi.testing.depthwise_conv2d_python_nchw(
+            depthwise_conv2d_scipy = tvm.topi.testing.depthwise_conv2d_python_nchw(
                 input_np, filter_np, stride, padding)
             relu_scipy = np.maximum(depthwise_conv2d_scipy, 0)
             return (_transform_data(input_np, ic_block),
 # under the License.
 import tvm
 from tvm import te
-import topi
+from tvm import topi
 import numpy as np
 from tvm.contrib.pickle_memoize import memoize
 from scipy import signal
-from topi.util import get_const_tuple
-from topi.nn.util import get_pad_tuple
-import topi.testing
-from topi.cuda.depthwise_conv2d import schedule_depthwise_conv2d_backward_input_nhwc
+from tvm.topi.util import get_const_tuple
+from tvm.topi.nn.util import get_pad_tuple
+import tvm.topi.testing
+from tvm.topi.cuda.depthwise_conv2d import schedule_depthwise_conv2d_backward_input_nhwc
 
 
 def verify_depthwise_conv2d_back_input(batch, in_channel, in_h, channel_multiplier, filter_h, stride_h, padding_h):
@@ -67,7 +67,7 @@ def verify_depthwise_conv2d_back_input(batch, in_channel, in_h, channel_multipli
         def get_ref_data():
             out_grad_np = np.random.uniform(size=out_grad_shape).astype(dtype)
             filter_np = np.random.uniform(size=filter_shape).astype(dtype)
-            dilated_out_grad_np = topi.testing.dilate_python(out_grad_np, [1, stride_h, stride_w, 1])
+            dilated_out_grad_np = tvm.topi.testing.dilate_python(out_grad_np, [1, stride_h, stride_w, 1])
             # padding params in forward propagation
             fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple([padding_h, padding_w], (filter_h, filter_w))
             # padding params in backward propagation
 # under the License.
 import tvm
 from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 import numpy as np
 from tvm.contrib.pickle_memoize import memoize
 from scipy import signal
-from topi.util import get_const_tuple
-from topi.nn.util import get_pad_tuple
-from topi.cuda.depthwise_conv2d import schedule_depthwise_conv2d_backward_weight_nhwc
+from tvm.topi.util import get_const_tuple
+from tvm.topi.nn.util import get_pad_tuple
+from tvm.topi.cuda.depthwise_conv2d import schedule_depthwise_conv2d_backward_weight_nhwc
 
 
 def verify_depthwise_conv2d_back_weight(batch, in_channel, in_h, channel_multiplier, filter_h, stride_h, padding_h):
@@ -67,7 +67,7 @@ def verify_depthwise_conv2d_back_weight(batch, in_channel, in_h, channel_multipl
         def get_ref_data():
             out_grad_np = np.random.uniform(size=out_grad_shape).astype(dtype)
             input_np = np.random.uniform(size=in_shape).astype(dtype)
-            dilated_out_grad_np = topi.testing.dilate_python(out_grad_np, [1, stride_h, stride_w, 1])
+            dilated_out_grad_np = tvm.topi.testing.dilate_python(out_grad_np, [1, stride_h, stride_w, 1])
 
             pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple([padding_h, padding_w], (filter_h, filter_w))
             padded_input_np = np.zeros((batch, in_h+pad_top+pad_bottom, in_w+pad_left+pad_right, in_channel))
similarity index 94%
rename from topi/tests/python/test_topi_dilate.py
rename to tests/python/topi/python/test_topi_dilate.py
index 1e69383..60f2083 100644 (file)
@@ -16,8 +16,8 @@
 # under the License.
 import tvm
 from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 import numpy as np
 
 
@@ -30,7 +30,7 @@ def test_dilate():
         Output = topi.nn.dilate(Input, strides)
         schedule = te.create_schedule(Output.op)
         input_np = np.random.uniform(size=input_size).astype(Input.dtype)
-        output_np = topi.testing.dilate_python(input_np, strides)
+        output_np = tvm.topi.testing.dilate_python(input_np, strides)
         input_tvm = tvm.nd.array(input_np, ctx=ctx)
         output_size = topi.util.get_const_tuple(Output.shape)
         output_tvm = tvm.nd.array(np.zeros(shape=output_size).astype(Output.dtype), ctx=ctx)
@@ -21,10 +21,10 @@ import tvm
 from tvm import te
 from tvm import autotvm
 from tvm.autotvm.task.space import FallbackConfigEntity
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 from tvm.contrib.pickle_memoize import memoize
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
 
 from common import get_all_backend, Int8Fallback
 
@@ -56,8 +56,8 @@ def verify_group_conv2d_nchw(batch, in_channel, in_size, num_filter, kernel, str
         a_np = np.random.uniform(size=a_shape).astype(dtype)
         w_np = np.random.uniform(size=w_shape).astype(dtype)
         b_np = np.random.uniform(size=bias_shape).astype(dtype)
-        dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
-        c_np = topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding, groups).astype(dtype)
+        dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
+        c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding, groups).astype(dtype)
 
         if add_bias:
             b_np = np.random.uniform(size=bias_shape).astype(dtype)
@@ -77,7 +77,7 @@ def verify_group_conv2d_nchw(batch, in_channel, in_size, num_filter, kernel, str
 
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            fcompute, fschedule = topi.testing.dispatch(device, _group_conv2d_nchw_implement)
+            fcompute, fschedule = tvm.topi.testing.dispatch(device, _group_conv2d_nchw_implement)
             C = fcompute(A, W, stride, padding, dilation, groups, dtype)
             if add_bias:
                 C = topi.add(C, bias)
@@ -128,8 +128,8 @@ def verify_group_conv2d_NCHWc_int8(batch, in_channel, in_size, num_filter, kerne
         a_np = np.random.randint(low=-128, high=127, size=a_shape).astype(dtype)
         w_np = np.random.randint(low=-128, high=128, size=w_shape).astype(dtype)
         b_np = np.random.uniform(size=bias_shape).astype(dtype)
-        dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
-        c_np = topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding, groups).astype(dtype)
+        dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
+        c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding, groups).astype(dtype)
 
         # convert to NCHWc
         _, _, out_height, out_width = c_np.shape
@@ -21,10 +21,10 @@ import numpy as np
 import tvm
 from tvm import te
 from tvm import autotvm
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 from tvm.contrib.pickle_memoize import memoize
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
 import pytest
 
 from common import get_all_backend
@@ -69,7 +69,7 @@ def verify_group_conv2d_NCHWc_int8(batch, in_channel, groups, in_size, num_filte
     def get_ref_data():
         a_np = np.random.uniform(size=(batch, in_channel, in_height, in_width)).astype("uint8")
         w_np = np.random.uniform(size=(num_filter, in_channel//groups, kernel, kernel)).astype("int8")
-        c_np = topi.testing.conv2d_nchw_python(a_np, w_np, stride, padding, groups)
+        c_np = tvm.topi.testing.conv2d_nchw_python(a_np, w_np, stride, padding, groups)
         return _transform_data(a_np, ic_block), _transform_kernel(w_np, ic_block, oc_block), \
                _transform_data(c_np, oc_block)
 
similarity index 91%
rename from topi/tests/python/test_topi_image.py
rename to tests/python/topi/python/test_topi_image.py
index 012ed42..8d00929 100644 (file)
@@ -18,8 +18,8 @@
 import numpy as np
 import tvm
 from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 from tvm.contrib.pickle_memoize import memoize
 
 from common import get_all_backend
@@ -41,11 +41,11 @@ def verify_resize(batch, in_channel, in_height, in_width, out_height, out_width,
             'Layout not supported {} '.format(layout))
     B = topi.image.resize(A, (out_height, out_width), layout=layout, coordinate_transformation_mode=coord_trans, method=method)
     if method == "bilinear":
-        b_np = topi.testing.bilinear_resize_python(a_np, (out_height, out_width), layout, coord_trans)
+        b_np = tvm.topi.testing.bilinear_resize_python(a_np, (out_height, out_width), layout, coord_trans)
     else:
         scale_h = out_height / in_height
         scale_w = out_width / in_width
-        b_np = topi.testing.upsampling_python(a_np, (scale_h, scale_w), layout)
+        b_np = tvm.topi.testing.upsampling_python(a_np, (scale_h, scale_w), layout)
 
     def check_device(device):
         ctx = tvm.context(device, 0)
@@ -54,7 +54,7 @@ def verify_resize(batch, in_channel, in_height, in_width, out_height, out_width,
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_injective_schedule(device)(B)
+            s = tvm.topi.testing.get_injective_schedule(device)(B)
         a = tvm.nd.array(a_np, ctx)
         b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx)
         f = tvm.build(s, [A, B], device)
@@ -103,13 +103,13 @@ def verify_resize3d(batch, in_channel, in_depth, in_height, in_width, out_depth,
                             coordinate_transformation_mode=coordinate_transformation_mode, method=method)
 
     if method == "trilinear":
-        b_np = topi.testing.trilinear_resize3d_python(a_np, (out_depth, out_height, out_width), layout,
+        b_np = tvm.topi.testing.trilinear_resize3d_python(a_np, (out_depth, out_height, out_width), layout,
                                                       coordinate_transformation_mode)
     else:
         scale_d = out_depth / in_depth
         scale_h = out_height / in_height
         scale_w = out_width / in_width
-        b_np = topi.testing.upsampling3d_python(a_np, (scale_d, scale_h, scale_w), layout)
+        b_np = tvm.topi.testing.upsampling3d_python(a_np, (scale_d, scale_h, scale_w), layout)
 
     def check_device(device):
         ctx = tvm.context(device, 0)
@@ -118,7 +118,7 @@ def verify_resize3d(batch, in_channel, in_depth, in_height, in_width, out_depth,
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_injective_schedule(device)(B)
+            s = tvm.topi.testing.get_injective_schedule(device)(B)
         a = tvm.nd.array(a_np, ctx)
         b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx)
         f = tvm.build(s, [A, B], device)
@@ -168,7 +168,7 @@ def test_crop_and_resize():
         out = topi.image.crop_and_resize(images, boxes, box_ind, np_crop_size, layout=layout,
                                          method=method, extrapolation_value=extrapolation_value)
 
-        baseline_np = topi.testing.crop_and_resize_python(np_images, np_boxes, np_box_indices,
+        baseline_np = tvm.topi.testing.crop_and_resize_python(np_images, np_boxes, np_box_indices,
                                                           np_crop_size, layout, method,
                                                           extrapolation_value)
         def check_device(device):
@@ -178,7 +178,7 @@ def test_crop_and_resize():
                 return
             print("Running on target: %s" % device)
             with tvm.target.create(device):
-                s = topi.testing.get_injective_schedule(device)(out)
+                s = tvm.topi.testing.get_injective_schedule(device)(out)
             tvm_images = tvm.nd.array(np_images, ctx)
             tvm_boxes = tvm.nd.array(np_boxes, ctx)
             tvm_indices = tvm.nd.array(np_box_indices, ctx)
@@ -216,7 +216,7 @@ def test_affine_grid():
         @memoize("topi.tests.test_affine_grid.verify_affine_grid")
         def get_ref_data():
             data_np = np.random.uniform(size=data_shape).astype(dtype)
-            out_np = topi.testing.affine_grid_python(data_np, target_shape)
+            out_np = tvm.topi.testing.affine_grid_python(data_np, target_shape)
             return data_np, out_np
 
         data_np, out_np = get_ref_data()
@@ -228,7 +228,7 @@ def test_affine_grid():
                 return
             print("Running on target: %s" % device)
             with tvm.target.create(device):
-                s = topi.testing.get_injective_schedule(device)(out)
+                s = tvm.topi.testing.get_injective_schedule(device)(out)
             tvm_data = tvm.nd.array(data_np, ctx)
             tvm_out = tvm.nd.empty(out_np.shape, dtype, ctx)
             f = tvm.build(s, [data, out], device)
@@ -256,7 +256,7 @@ def test_grid_sample():
             data_np = np.random.uniform(size=data_shape).astype(dtype)
             # allow grid values to be out-of-bound
             grid_np = np.random.uniform(size=grid_shape, low=-1.5, high=1.5).astype(dtype)
-            out_np = topi.testing.grid_sample_nchw_python(data_np, grid_np, 'bilinear')
+            out_np = tvm.topi.testing.grid_sample_nchw_python(data_np, grid_np, 'bilinear')
             return data_np, grid_np, out_np
 
         data_np, grid_np, out_np = get_ref_data()
@@ -268,7 +268,7 @@ def test_grid_sample():
                 return
             print("Running on target: %s" % device)
             with tvm.target.create(device):
-                s = topi.testing.get_injective_schedule(device)(out)
+                s = tvm.topi.testing.get_injective_schedule(device)(out)
             tvm_data = tvm.nd.array(data_np, ctx)
             tvm_grid = tvm.nd.array(grid_np, ctx)
             tvm_out = tvm.nd.empty(out_np.shape, dtype, ctx)
similarity index 90%
rename from topi/tests/python/test_topi_lrn.py
rename to tests/python/topi/python/test_topi_lrn.py
index 7e003a7..2d57d07 100644 (file)
@@ -18,9 +18,9 @@
 import numpy as np
 import tvm
 from tvm import te
-import topi
-from topi.util import get_const_tuple
-import topi.testing
+from tvm import topi
+from tvm.topi.util import get_const_tuple
+import tvm.topi.testing
 
 _lrn_schedule = {
     "generic": topi.generic.schedule_lrn,
@@ -38,7 +38,7 @@ def verify_lrn(shape, size, axis, bias, alpha, beta):
     dtype = A.dtype
 
     a_np = np.random.uniform(size=shape).astype(dtype)
-    b_np = topi.testing.lrn_python(a_np, size, axis, bias, alpha, beta)
+    b_np = tvm.topi.testing.lrn_python(a_np, size, axis, bias, alpha, beta)
 
     def check_device(device):
         if not tvm.runtime.enabled(device):
@@ -46,7 +46,7 @@ def verify_lrn(shape, size, axis, bias, alpha, beta):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s_func = topi.testing.dispatch(device, _lrn_schedule)
+            s_func = tvm.topi.testing.dispatch(device, _lrn_schedule)
             s = s_func([B])
         ctx = tvm.context(device, 0)
         a = tvm.nd.array(a_np, ctx)
similarity index 96%
rename from topi/tests/python/test_topi_math.py
rename to tests/python/topi/python/test_topi_math.py
index 6f1e858..8a9754e 100644 (file)
@@ -19,9 +19,9 @@ import scipy
 from scipy import special
 import tvm
 from tvm import te
-import topi
-import topi.testing
-from topi import util
+from tvm import topi
+import tvm.topi.testing
+from tvm.topi import util
 from common import get_all_backend
 
 
@@ -64,7 +64,7 @@ def test_ewise():
                 return
             print("Running on target: %s" % device)
             with tvm.target.create(device):
-                s = topi.testing.get_injective_schedule(device)(B)
+                s = tvm.topi.testing.get_injective_schedule(device)(B)
             foo = tvm.build(s, [A, B], device, name=name)
             a = tvm.nd.array(a_np, ctx)
             b = tvm.nd.array(np.zeros_like(b_np), ctx)
@@ -104,7 +104,7 @@ def test_ewise():
                 return
             print("Running on target: %s" % device)
             with tvm.target.create(device):
-                s = topi.testing.get_injective_schedule(device)(B)
+                s = tvm.topi.testing.get_injective_schedule(device)(B)
             foo = tvm.build(s, [A, B], device, name="isnan")
             a = tvm.nd.array(a_np, ctx)
             b = tvm.nd.array(np.zeros_like(b_np), ctx)
@@ -134,7 +134,7 @@ def test_ewise():
                     print("Skip because %s is not enabled" % device)
                     return
                 with tvm.target.create(device):
-                    s = topi.testing.get_injective_schedule(device)(B)
+                    s = tvm.topi.testing.get_injective_schedule(device)(B)
                 foo = tvm.build(s, [A, B], device, name=name)
                 a = tvm.nd.array(a_np, ctx)
                 b = tvm.nd.array(np.zeros_like(b_np), ctx)
@@ -188,7 +188,7 @@ def test_cast():
                 continue
             print("Running on target: %s" % device)
             with tvm.target.create(device):
-                s = topi.testing.get_injective_schedule(device)(B)
+                s = tvm.topi.testing.get_injective_schedule(device)(B)
             foo = tvm.build(s, [A, B], device)
             a = tvm.nd.array(a_np, ctx)
             b = tvm.nd.empty(shape=shape, dtype=to_dtype, ctx=ctx)
similarity index 97%
rename from topi/tests/python/test_topi_matmul.py
rename to tests/python/topi/python/test_topi_matmul.py
index 0c0a365..4ffa29e 100644 (file)
@@ -17,8 +17,8 @@
 import numpy as np
 import tvm
 from tvm import te
-import topi
-from topi.util import get_const_tuple
+from tvm import topi
+from tvm.topi.util import get_const_tuple
 
 def with_tvm(lam, *args):
     """ Take numpy arrays as args, convert them to TVM tensors and call `lam`.
similarity index 95%
rename from topi/tests/python/test_topi_pooling.py
rename to tests/python/topi/python/test_topi_pooling.py
index 048de81..b24dd85 100644 (file)
@@ -20,9 +20,9 @@ import math
 import numpy as np
 import tvm
 from tvm import te
-import topi
-import topi.testing
-from topi.util import get_const_tuple
+from tvm import topi
+import tvm.topi.testing
+from tvm.topi.util import get_const_tuple
 from common import get_all_backend
 
 _pool_schedule = {
@@ -98,7 +98,7 @@ def verify_pool(n, ic, ih, kh, sh, padding, pool_type, ceil_mode, count_include_
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s_func = topi.testing.dispatch(device, _pool_schedule)
+            s_func = tvm.topi.testing.dispatch(device, _pool_schedule)
             s = s_func(B, layout)
 
         a = tvm.nd.array(a_np, ctx)
@@ -140,7 +140,7 @@ def verify_pool_grad(n, ic, ih, kh, sh, padding, pool_type, ceil_mode, count_inc
 
     a_np = np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype)
     out_grad_np = np.random.uniform(low=0.001, size=bshape).astype(dtype)
-    pool_grad_np = topi.testing.pool_grad_nchw(a_np, out_grad_np, pool_size=(kh, kw),
+    pool_grad_np = tvm.topi.testing.pool_grad_nchw(a_np, out_grad_np, pool_size=(kh, kw),
                                                strides=(sh, sw), padding=padding,
                                                pool_type=pool_type, ceil_mode=ceil_mode,
                                                count_include_pad=count_include_pad)
@@ -154,7 +154,7 @@ def verify_pool_grad(n, ic, ih, kh, sh, padding, pool_type, ceil_mode, count_inc
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s_func = topi.testing.dispatch(device, _pool_grad_schedule)
+            s_func = tvm.topi.testing.dispatch(device, _pool_grad_schedule)
             s = s_func(PoolGrad)
 
         a = tvm.nd.array(a_np, ctx)
@@ -229,7 +229,7 @@ def verify_global_pool(dshape, pool_type, layout='NCHW'):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s_func = topi.testing.dispatch(device, _adaptive_pool_schedule)
+            s_func = tvm.topi.testing.dispatch(device, _adaptive_pool_schedule)
             if device == "cuda":
                 s = s_func(B, layout)
             else:
@@ -258,7 +258,7 @@ def test_global_pool():
 def verify_adaptive_pool(dshape, out_size, pool_type, layout="NCHW", dtype="float32"):
     """verify function of adaptive_pool"""
     np_data = np.random.uniform(low=0, high=255, size=dshape).astype(dtype)
-    np_out = topi.testing.adaptive_pool(np_data, out_size, pool_type, layout)
+    np_out = tvm.topi.testing.adaptive_pool(np_data, out_size, pool_type, layout)
     oshape = np_out.shape
 
     data = te.placeholder(dshape, name="data", dtype=dtype)
@@ -275,7 +275,7 @@ def verify_adaptive_pool(dshape, out_size, pool_type, layout="NCHW", dtype="floa
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s_func = topi.testing.dispatch(device, _adaptive_pool_schedule)
+            s_func = tvm.topi.testing.dispatch(device, _adaptive_pool_schedule)
             if device == "cuda":
                 s = s_func(out, layout)
             else:
@@ -326,7 +326,7 @@ def verify_pool3d(n, ic, ih, kh, sh, padding, pool_type,
     output_shape = [int(i) for i in B.shape]
 
     input_np = np.random.uniform(low=0.001, size=input_shape).astype(dtype)
-    ref_np = topi.testing.pool3d_ncdhw_python(input_np, kernel, stride, padding,
+    ref_np = tvm.topi.testing.pool3d_ncdhw_python(input_np, kernel, stride, padding,
                                               output_shape, pool_type, count_include_pad, ceil_mode)
 
     def check_device(device):
@@ -336,7 +336,7 @@ def verify_pool3d(n, ic, ih, kh, sh, padding, pool_type,
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s_func = topi.testing.dispatch(device, _pool_schedule)
+            s_func = tvm.topi.testing.dispatch(device, _pool_schedule)
             s = s_func(B, layout)
 
         a = tvm.nd.array(input_np, ctx)
@@ -381,7 +381,7 @@ def verify_pool1d(n, ic, iw, kw, sw, padding, pool_type,
     output_shape = [int(i) for i in B.shape]
 
     input_np = np.random.uniform(low=0.001, size=input_shape).astype(dtype)
-    ref_np = topi.testing.pool1d_ncw_python(input_np, kernel, stride, padding,
+    ref_np = tvm.topi.testing.pool1d_ncw_python(input_np, kernel, stride, padding,
                                             output_shape, pool_type, count_include_pad, ceil_mode)
 
     def check_device(device):
@@ -391,7 +391,7 @@ def verify_pool1d(n, ic, iw, kw, sw, padding, pool_type,
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s_func = topi.testing.dispatch(device, _pool_schedule)
+            s_func = tvm.topi.testing.dispatch(device, _pool_schedule)
             s = s_func(B, layout)
 
         a = tvm.nd.array(input_np, ctx)
similarity index 98%
rename from topi/tests/python/test_topi_reduce.py
rename to tests/python/topi/python/test_topi_reduce.py
index cc84fe0..d84182f 100644 (file)
@@ -19,8 +19,8 @@ import os
 import numpy as np
 import tvm
 from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 
 from common import get_all_backend
 
@@ -76,7 +76,7 @@ def verify_reduce_map_ele(in_shape, axis, keepdims, type="sum", dtype="float32")
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_reduce_schedule(device)(B)
+            s = tvm.topi.testing.get_reduce_schedule(device)(B)
 
         foo = tvm.build(s, [A, B], device, name=type)
         # Test
similarity index 96%
rename from topi/tests/python/test_topi_relu.py
rename to tests/python/topi/python/test_topi_relu.py
index 4d4166f..1114b3f 100644 (file)
@@ -19,9 +19,9 @@ import os
 import numpy as np
 import tvm
 from tvm import te
-import topi
-import topi.testing
-from topi.util import get_const_tuple
+from tvm import topi
+import tvm.topi.testing
+from tvm.topi.util import get_const_tuple
 from tvm.contrib.nvcc import have_fp16
 
 from common import get_all_backend
@@ -43,7 +43,7 @@ def verify_relu(m, n, dtype="float32"):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_elemwise_schedule(device)(B)
+            s = tvm.topi.testing.get_elemwise_schedule(device)(B)
 
         a = tvm.nd.array(a_np, ctx)
         b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)
similarity index 91%
rename from topi/tests/python/test_topi_reorg.py
rename to tests/python/topi/python/test_topi_reorg.py
index 09c2f2f..e5a1947 100644 (file)
 # under the License.
 """Example code to do reorg."""
 import numpy as np
-import topi
-from topi.util import get_const_tuple
+from tvm import topi
+from tvm.topi.util import get_const_tuple
 import tvm
 from tvm import te
-import topi.testing
+import tvm.topi.testing
 
 _reorg_schedule = {
     "generic": topi.generic.schedule_reorg,
@@ -39,7 +39,7 @@ def verify_reorg(batch, in_size, in_channel, stride):
 
     def get_ref_data_reorg():
         a_np = np.random.uniform(size=a_shape).astype(dtype)
-        b_np = topi.testing.reorg_python(a_np, stride)
+        b_np = tvm.topi.testing.reorg_python(a_np, stride)
         return a_np, b_np
 
     a_np, b_np = get_ref_data_reorg()
@@ -52,7 +52,7 @@ def verify_reorg(batch, in_size, in_channel, stride):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s_func = topi.testing.dispatch(device, _reorg_schedule)
+            s_func = tvm.topi.testing.dispatch(device, _reorg_schedule)
             s = s_func([B])
         a = tvm.nd.array(a_np, ctx)
         b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)
similarity index 90%
rename from topi/tests/python/test_topi_softmax.py
rename to tests/python/topi/python/test_topi_softmax.py
index e213074..1ff69be 100644 (file)
@@ -19,10 +19,10 @@ import os
 import numpy as np
 import tvm
 from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 import logging
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
 
 from common import get_all_backend
 
@@ -40,7 +40,7 @@ def check_device(A, B, a_np, b_np, device, name):
         return
     print("Running on target: %s" % device)
     with tvm.target.create(device):
-        s_func = topi.testing.dispatch(device, _softmax_schedule)
+        s_func = tvm.topi.testing.dispatch(device, _softmax_schedule)
         s = s_func(B)
 
     a = tvm.nd.array(a_np, ctx)
@@ -57,7 +57,7 @@ def verify_softmax(m, n, dtype="float32"):
     tvm.lower(s, [A, B], simple_mode=True)
 
     a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
-    b_np = topi.testing.softmax_python(a_np)
+    b_np = tvm.topi.testing.softmax_python(a_np)
 
     for device in get_all_backend():
         check_device(A, B, a_np, b_np, device, "softmax")
@@ -68,7 +68,7 @@ def verify_softmax_4d(shape, dtype="float32"):
 
     _, c, h, w = shape
     a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
-    b_np = topi.testing.softmax_python(a_np.transpose(0, 2, 3, 1).reshape(h*w, c))
+    b_np = tvm.topi.testing.softmax_python(a_np.transpose(0, 2, 3, 1).reshape(h*w, c))
     b_np = b_np.reshape(1, h, w, c).transpose(0, 3, 1, 2)
 
     for device in get_all_backend():
@@ -87,7 +87,7 @@ def verify_log_softmax(m, n, dtype="float32"):
     s = te.create_schedule([B.op])
     tvm.lower(s, [A, B], simple_mode=True)
     a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
-    b_np = topi.testing.log_softmax_python(a_np)
+    b_np = tvm.topi.testing.log_softmax_python(a_np)
 
     for device in get_all_backend():
         check_device(A, B, a_np, b_np, device, "log_softmax")
similarity index 95%
rename from topi/tests/python/test_topi_sort.py
rename to tests/python/topi/python/test_topi_sort.py
index 2728733..7abfe58 100644 (file)
@@ -19,8 +19,8 @@ from __future__ import print_function
 import numpy as np
 import tvm
 from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 
 _argsort_implement = {
     "generic": (topi.argsort, topi.generic.schedule_argsort),
@@ -58,7 +58,7 @@ def verify_argsort(axis, is_ascend):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            fcompute, fschedule = topi.testing.dispatch(device, _argsort_implement)
+            fcompute, fschedule = tvm.topi.testing.dispatch(device, _argsort_implement)
             out = fcompute(data, axis=axis, is_ascend=is_ascend)
             s = fschedule(out)
 
@@ -102,7 +102,7 @@ def verify_topk(k, axis, ret_type, is_ascend, dtype):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            fcompute, fschedule = topi.testing.dispatch(device, _topk_implement)
+            fcompute, fschedule = tvm.topi.testing.dispatch(device, _topk_implement)
             outs = fcompute(data, k, axis, ret_type, is_ascend, dtype)
             outs = outs if isinstance(outs, list) else [outs]
             s = fschedule(outs)
@@ -18,8 +18,8 @@
 import numpy as np
 import tvm
 from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 
 from common import get_all_backend
 
@@ -45,7 +45,7 @@ def verify_space_to_depth(block_size, batch, in_channel, in_height, in_width, la
     B = topi.nn.space_to_depth(A, block_size=block_size, layout=layout)
     if layout == 'NHWC':
         a_np = np.transpose(a_np, axes=[0, 3, 1, 2])
-    b_np = topi.testing.space_to_depth_python(a_np, block_size)
+    b_np = tvm.topi.testing.space_to_depth_python(a_np, block_size)
     if layout == 'NHWC':
         a_np = np.transpose(a_np, axes=[0, 2, 3, 1])
         b_np = np.transpose(b_np, axes=[0, 2, 3, 1])
@@ -57,7 +57,7 @@ def verify_space_to_depth(block_size, batch, in_channel, in_height, in_width, la
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_injective_schedule(device)(B)
+            s = tvm.topi.testing.get_injective_schedule(device)(B)
         a = tvm.nd.array(a_np, ctx)
         b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx)
         f = tvm.build(s, [A, B], device)
similarity index 98%
rename from topi/tests/python/test_topi_sparse.py
rename to tests/python/topi/python/test_topi_sparse.py
index 748181d..e5fd0e9 100644 (file)
@@ -18,9 +18,9 @@
 import numpy as np
 import tvm
 from tvm import te
-import topi
-import topi.testing
-from topi.util import get_const_tuple
+from tvm import topi
+import tvm.topi.testing
+from tvm.topi.util import get_const_tuple
 import tvm.contrib.sparse as tvmsp
 from collections import namedtuple
 import time
@@ -307,7 +307,7 @@ def verify_sparse_dense_bsr(M, N, K, BS_R, BS_C, density, use_relu):
             print("Skip because %s is not enabled" % device)
             return
         print("Running on target: %s" % device)
-        fcompute, fschedule = topi.testing.dispatch(device, _sparse_dense_implement)
+        fcompute, fschedule = tvm.topi.testing.dispatch(device, _sparse_dense_implement)
         with tvm.target.create(device):
             Y = fcompute(X, W_data, W_indices, W_indptr)
             if use_relu:
@@ -355,7 +355,7 @@ def test_sparse_dense_bsr_randomized():
                 print("Skip because %s is not enabled" % device)
                 return
             print("Running on target: %s" % device)
-            fcompute, fschedule = topi.testing.dispatch(device, _sparse_dense_implement)
+            fcompute, fschedule = tvm.topi.testing.dispatch(device, _sparse_dense_implement)
             with tvm.target.create(device):
                 Y = fcompute(X, W_data, W_indices, W_indptr)
                 s = fschedule([Y])
similarity index 97%
rename from topi/tests/python/test_topi_tensor.py
rename to tests/python/topi/python/test_topi_tensor.py
index 68ea7ab..3444284 100644 (file)
@@ -18,8 +18,8 @@
 import numpy as np
 import tvm
 from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 from tvm.contrib.pickle_memoize import memoize
 from tvm.contrib.nvcc import have_fp16
 
@@ -100,7 +100,7 @@ def verify_vectorization(n, m, dtype):
             A = te.placeholder((n, m), name='A', dtype=dtype)
             B = te.compute((n, m), lambda i, j:
                              A[i, j] + tvm.tir.const(1, A.dtype), name='B')
-            S = topi.testing.get_elemwise_schedule(device)(B)
+            S = tvm.topi.testing.get_elemwise_schedule(device)(B)
 
             fun = tvm.build(S, [A, B], device)
             np_A = tvm.nd.empty((n, m), A.dtype, ctx).copyfrom(
similarity index 94%
rename from topi/tests/python/test_topi_transform.py
rename to tests/python/topi/python/test_topi_transform.py
index ee7f114..13d24d5 100644 (file)
@@ -19,8 +19,8 @@ import numpy as np
 import pytest
 import tvm
 from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 from tvm.contrib.nvcc import have_fp16
 
 from common import get_all_backend
@@ -35,7 +35,7 @@ def verify_expand_dims(in_shape, out_shape, axis, num_newaxis):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_broadcast_schedule(device)(B)
+            s = tvm.topi.testing.get_broadcast_schedule(device)(B)
         foo = tvm.build(s, [A, B], device, name="expand_dims")
         data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
         out_npy = data_npy.reshape(out_shape)
@@ -61,7 +61,7 @@ def verify_reinterpret(in_shape, in_dtype, out_dtype, generator):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_elemwise_schedule(device)(B)
+            s = tvm.topi.testing.get_elemwise_schedule(device)(B)
         foo = tvm.build(s, [A, B], device, name="reinterpret")
         data_npy = generator(in_shape).astype(in_dtype)
         out_npy = data_npy.view(B.dtype)
@@ -84,7 +84,7 @@ def verify_transpose(in_shape, axes):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_injective_schedule(device)(B)
+            s = tvm.topi.testing.get_injective_schedule(device)(B)
         foo = tvm.build(s, [A, B], device, name="transpose")
         data_npy = np.arange(np.prod(in_shape)).reshape(in_shape).astype(A.dtype)
         out_npy = data_npy.transpose(axes)
@@ -107,7 +107,7 @@ def verify_reshape(src_shape, dst_shape):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_injective_schedule(device)(B)
+            s = tvm.topi.testing.get_injective_schedule(device)(B)
         foo = tvm.build(s, [A, B], device, name="reshape")
         data_npy = np.random.normal(size=src_shape).astype(A.dtype)
         out_npy = np.reshape(data_npy, newshape=dst_shape)
@@ -130,7 +130,7 @@ def verify_squeeze(src_shape, axis):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_injective_schedule(device)(B)
+            s = tvm.topi.testing.get_injective_schedule(device)(B)
 
         foo = tvm.build(s, [A, B], device, name="squeeze")
         data_npy = np.random.normal(size=src_shape).astype(A.dtype)
@@ -156,7 +156,7 @@ def verify_concatenate(shapes, axis):
         for key in target.keys:
             if key in schedule_map:
                 return schedule_map[key]
-        return topi.testing.get_injective_schedule(target)
+        return tvm.topi.testing.get_injective_schedule(target)
 
     tensor_l = []
     for i, shape in enumerate(shapes):
@@ -194,7 +194,7 @@ def verify_stack(shapes, axis):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_broadcast_schedule(device)(out_tensor)
+            s = tvm.topi.testing.get_broadcast_schedule(device)(out_tensor)
 
         foo = tvm.build(s, tensor_l + [out_tensor], device, name="stack")
         data_npys = [np.random.normal(size=shape).astype(tensor_l[0].dtype) for shape in shapes]
@@ -218,7 +218,7 @@ def verify_split(src_shape, indices_or_sections, axis):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_injective_schedule(device)(tensor_l)
+            s = tvm.topi.testing.get_injective_schedule(device)(tensor_l)
 
         foo = tvm.build(s, [A] + list(tensor_l), device, name="split")
         data_npy = np.random.normal(size=src_shape).astype(A.dtype)
@@ -277,7 +277,7 @@ def verify_flip(in_shape, axis):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_injective_schedule(device)(B)
+            s = tvm.topi.testing.get_injective_schedule(device)(B)
 
         foo = tvm.build(s, [A, B], device, name="reverse")
         x_np = np.random.uniform(size=in_shape).astype(A.dtype)
@@ -305,7 +305,7 @@ def test_reverse_sequence():
                 return
             print("Running on target: %s" % device)
             with tvm.target.create(device):
-                s = topi.testing.get_injective_schedule(device)(C)
+                s = tvm.topi.testing.get_injective_schedule(device)(C)
 
             foo = tvm.build(s, [A, B, C], device, name="reverse_sequence")
 
@@ -387,7 +387,7 @@ def verify_take(src_shape, indices_src, axis=None, mode="clip"):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_injective_schedule(device)(out_tensor)
+            s = tvm.topi.testing.get_injective_schedule(device)(out_tensor)
 
         foo = tvm.build(s, [A] + [indices] + [out_tensor] , device, name="take")
         shape_size = 1
@@ -422,11 +422,11 @@ def verify_strided_slice(in_shape, begin, end, strides=None):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_injective_schedule(device)(B)
+            s = tvm.topi.testing.get_injective_schedule(device)(B)
 
         foo = tvm.build(s, [A, B], device, name="stride_slice")
         x_np = np.random.uniform(size=in_shape).astype(A.dtype)
-        out_npy = topi.testing.strided_slice_python(
+        out_npy = tvm.topi.testing.strided_slice_python(
             x_np, begin, end, strides) + 1
         data_nd = tvm.nd.array(x_np, ctx)
         out_nd = tvm.nd.empty(out_npy.shape, ctx=ctx, dtype=A.dtype)
@@ -454,7 +454,7 @@ def verify_strided_set(in_shape, v_shape, begin, end, strides=None):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_injective_schedule(device)(B)
+            s = tvm.topi.testing.get_injective_schedule(device)(B)
 
         if strides is not None:
             foo = tvm.build(s, [A, V, b, e, st, B], device, name="stride_set")
@@ -466,7 +466,7 @@ def verify_strided_set(in_shape, v_shape, begin, end, strides=None):
         v_np = np.random.uniform(size=v_shape).astype(V.dtype)
         b_np = np.asarray(begin).astype('int32')
         e_np = np.asarray(end).astype('int32')
-        out_npy = topi.testing.strided_set_python(
+        out_npy = tvm.topi.testing.strided_set_python(
             x_np, v_np, begin, end, strides) + 1
         data_nd = tvm.nd.array(x_np, ctx)
         v_nd = tvm.nd.array(v_np, ctx)
@@ -497,10 +497,10 @@ def verify_gather(data, axis, indices):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_injective_schedule(device)(out_tensor)
+            s = tvm.topi.testing.get_injective_schedule(device)(out_tensor)
 
         func = tvm.build(s, [var_data, var_indices, out_tensor] , device, name="gather")
-        out_npys = topi.testing.gather_python(data, axis, indices)
+        out_npys = tvm.topi.testing.gather_python(data, axis, indices)
 
         data_nd = tvm.nd.array(data, ctx)
         indices_nd = tvm.nd.array(indices, ctx)
@@ -525,14 +525,14 @@ def verify_gather_nd(src_shape, indices_src, indices_dtype):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_injective_schedule(device)(out_tensor)
+            s = tvm.topi.testing.get_injective_schedule(device)(out_tensor)
 
         func = tvm.build(s, [A, indices, out_tensor] , device, name="take")
         shape_size = 1
         for i in range(len(src_shape)):
             shape_size = shape_size * src_shape[i]
         data_npy = np.arange(shape_size, dtype=src_dtype).reshape((src_shape))
-        out_npys = topi.testing.gather_nd_python(data_npy, indices_src)
+        out_npys = tvm.topi.testing.gather_nd_python(data_npy, indices_src)
 
         data_nd = tvm.nd.array(data_npy, ctx)
         indices_nd = tvm.nd.array(indices_src, ctx)
@@ -564,7 +564,7 @@ def verify_arange(start, stop, step):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_injective_schedule(device)(A)
+            s = tvm.topi.testing.get_injective_schedule(device)(A)
         f = tvm.build(s, [A], device, name="arange")
         a_nd = tvm.nd.empty(a_np.shape, dtype='float32', ctx=ctx)
         f(a_nd)
@@ -583,7 +583,7 @@ def verify_repeat(in_shape, repeats, axis):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_broadcast_schedule(device)(B)
+            s = tvm.topi.testing.get_broadcast_schedule(device)(B)
         foo = tvm.build(s, [A, B], device, name="repeat")
         data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
         out_npy = np.repeat(data_npy, repeats, axis)
@@ -605,7 +605,7 @@ def verify_tile(in_shape, reps):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_broadcast_schedule(device)(B)
+            s = tvm.topi.testing.get_broadcast_schedule(device)(B)
         foo = tvm.build(s, [A, B], device, name="tile")
         data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
         out_npy = np.tile(data_npy, reps)
@@ -630,7 +630,7 @@ def verify_where(in_shape):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_broadcast_schedule(device)(C)
+            s = tvm.topi.testing.get_broadcast_schedule(device)(C)
         f = tvm.build(s, [Cond, A, B, C], device, name="where")
         cond_npy = np.random.uniform(low=-1, high=1, size=in_shape).astype(dtype)
         x_npy = np.random.uniform(size=in_shape).astype(dtype)
@@ -658,10 +658,10 @@ def verify_one_hot(indices_shape, depth, on_value, off_value, axis, dtype):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_injective_schedule(device)(one_hot_result)
+            s = tvm.topi.testing.get_injective_schedule(device)(one_hot_result)
         fn = tvm.build(s, [indices, one_hot_result], device, name="one_hot")
         indices_npy = np.random.randint(0, depth, size=indices_shape).astype(indices.dtype)
-        out_npy = topi.testing.one_hot(indices_npy, on_value, off_value, depth, axis, dtype)
+        out_npy = tvm.topi.testing.one_hot(indices_npy, on_value, off_value, depth, axis, dtype)
         indices_nd = tvm.nd.array(indices_npy, ctx)
         out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(one_hot_result.dtype), ctx)
         fn(indices_nd, out_nd)
@@ -691,7 +691,7 @@ def verify_unravel_index(indices, shape, dtype):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_injective_schedule(device)(Z)
+            s = tvm.topi.testing.get_injective_schedule(device)(Z)
         foo = tvm.build(s, [X, Y, Z], device, name="unravel_index")
 
         out_npy = np.unravel_index(x_data, y_data)
@@ -727,7 +727,7 @@ def verify_sparse_to_dense(sparse_indices, sparse_values, default_value, output_
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_injective_schedule(device)(D)
+            s = tvm.topi.testing.get_injective_schedule(device)(D)
 
         foo = tvm.build(s, args + [D], device, name="sparse_to_dense")
 
@@ -816,7 +816,7 @@ def test_squeeze():
         ctx = tvm.context(device, 0)
         if ctx.exist:
             with tvm.target.create(device):
-                s = topi.testing.get_injective_schedule(device)(C)
+                s = tvm.topi.testing.get_injective_schedule(device)(C)
                 func = tvm.build(s, [A, C])
             a = tvm.nd.array(np.array((1, 2)).astype('float32'), ctx=ctx)
             c = tvm.nd.empty((1,), dtype='float32', ctx=ctx)
@@ -948,7 +948,7 @@ def test_layout_transform():
         tvm_output = tvm.nd.empty(output.shape, ctx=ctx, dtype=B.dtype)
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_injective_schedule(device)(B)
+            s = tvm.topi.testing.get_injective_schedule(device)(B)
         f = tvm.build(s, [A, B], device, name="layout_transform")
         f(tvm_input, tvm_output)
         tvm.testing.assert_allclose(tvm_output.asnumpy(), output)
@@ -975,7 +975,7 @@ def test_shape():
         tvm_output = tvm.nd.empty(output.shape, ctx=ctx, dtype=dtype)
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_injective_schedule(device)(B)
+            s = tvm.topi.testing.get_injective_schedule(device)(B)
         f = tvm.build(s, [A, B], device, name="shape")
         f(tvm_input, tvm_output)
         tvm.testing.assert_allclose(tvm_output.asnumpy(), output)
@@ -995,7 +995,7 @@ def test_sequence_mask():
                 C = topi.sequence_mask(A, B, axis=axis, mask_value=mask_value)
                 A_data = np.random.normal(0, 1, in_shape).astype(np.float32)
                 B_data = np.random.randint(1, max_length, (batch_size,)).astype(np.int32)
-                C_gt_data = topi.testing.sequence_mask(A_data, B_data, mask_value, axis)
+                C_gt_data = tvm.topi.testing.sequence_mask(A_data, B_data, mask_value, axis)
 
                 def check_device(device):
                     ctx = tvm.context(device, 0)
@@ -1007,7 +1007,7 @@ def test_sequence_mask():
                     tvm_C = tvm.nd.empty(in_shape, ctx=ctx, dtype="float32")
                     print("Running on target: %s" % device)
                     with tvm.target.create(device):
-                        s = topi.testing.get_injective_schedule(device)(C)
+                        s = tvm.topi.testing.get_injective_schedule(device)(C)
                     f = tvm.build(s, [A, B, C], device, name="SequenceMask")
                     f(tvm_A, tvm_B, tvm_C)
                     tvm.testing.assert_allclose(tvm_C.asnumpy(), C_gt_data)
@@ -1032,7 +1032,7 @@ def test_ndarray_size():
         tvm_output = tvm.nd.empty((), ctx=ctx, dtype=B.dtype)
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_injective_schedule(device)(B)
+            s = tvm.topi.testing.get_injective_schedule(device)(B)
         f = tvm.build(s, [A, B], device, name="ndarray_size")
         f(tvm_input, tvm_output)
         tvm.testing.assert_allclose(tvm_output.asnumpy(), output)
@@ -1050,7 +1050,7 @@ def test_where_fusion():
                 print("Skip because %s is not enabled" % device)
                 return
             print("Running on target: %s" % device)
-            conv2d_compute, conv2d_schedule = topi.testing.get_conv2d_nchw_implement(device)
+            conv2d_compute, conv2d_schedule = tvm.topi.testing.get_conv2d_nchw_implement(device)
             data = te.placeholder((2, 1, 2, 4), 'int8', 'data')
             w = te.placeholder((3, 1, 2, 2), 'int8', 'w')
             conv1 = conv2d_compute(data, w, 1, 0, 1, 'int32')
similarity index 93%
rename from topi/tests/python/test_topi_upsampling.py
rename to tests/python/topi/python/test_topi_upsampling.py
index 874471b..04cc310 100644 (file)
 import numpy as np
 import tvm
 from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 import math
-from topi.util import nchw_pack_layout
+from tvm.topi.util import nchw_pack_layout
 
 from common import get_all_backend
 
@@ -54,9 +54,9 @@ def verify_upsampling(batch, in_channel, in_height, in_width, scale_h, scale_w,
 
     if method == "bilinear":
         out_size = (int(round(in_height*scale_h)), int(round(in_width*scale_w)))
-        b_np = topi.testing.bilinear_resize_python(a_np, out_size, layout, "asymmetric")
+        b_np = tvm.topi.testing.bilinear_resize_python(a_np, out_size, layout, "asymmetric")
     else:
-        b_np = topi.testing.upsampling_python(a_np, (scale_h, scale_w), layout)
+        b_np = tvm.topi.testing.upsampling_python(a_np, (scale_h, scale_w), layout)
 
     def check_device(device):
         ctx = tvm.context(device, 0)
@@ -65,7 +65,7 @@ def verify_upsampling(batch, in_channel, in_height, in_width, scale_h, scale_w,
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_injective_schedule(device)(B)
+            s = tvm.topi.testing.get_injective_schedule(device)(B)
         a = tvm.nd.array(a_np, ctx)
         b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx)
         f = tvm.build(s, [A, B], device)
@@ -136,10 +136,10 @@ def verify_upsampling3d(batch, in_channel, in_depth, in_height, in_width, scale_
 
     if method == "trilinear":
         out_size = (int(round(in_depth*scale_d)), int(round(in_height*scale_h)), int(round(in_width*scale_w)))
-        b_np = topi.testing.trilinear_resize3d_python(a_np, out_size, layout,
+        b_np = tvm.topi.testing.trilinear_resize3d_python(a_np, out_size, layout,
                                                       coordinate_transformation_mode="half_pixel")
     else:
-        b_np = topi.testing.upsampling3d_python(a_np, (scale_d, scale_h, scale_w), layout)
+        b_np = tvm.topi.testing.upsampling3d_python(a_np, (scale_d, scale_h, scale_w), layout)
 
     def check_device(device):
         ctx = tvm.context(device, 0)
@@ -148,7 +148,7 @@ def verify_upsampling3d(batch, in_channel, in_depth, in_height, in_width, scale_
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_injective_schedule(device)(B)
+            s = tvm.topi.testing.get_injective_schedule(device)(B)
         a = tvm.nd.array(a_np, ctx)
         b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx)
         f = tvm.build(s, [A, B], device)
similarity index 97%
rename from topi/tests/python/test_topi_util.py
rename to tests/python/topi/python/test_topi_util.py
index 534b699..345e7f9 100644 (file)
@@ -16,7 +16,7 @@
 # under the License.
 """Test code for util"""
 
-import topi
+from tvm import topi
 
 
 def verify_get_shape(src_shape, src_layout, dst_layout, expect_shape):
similarity index 95%
rename from topi/tests/python/test_topi_vision.py
rename to tests/python/topi/python/test_topi_vision.py
index b74e193..e0e2205 100644 (file)
@@ -20,12 +20,12 @@ import math
 import numpy as np
 import tvm
 from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 
 from tvm.contrib.pickle_memoize import memoize
-from topi.util import get_const_tuple
-from topi.vision import ssd, non_max_suppression, get_valid_counts
+from tvm.topi.util import get_const_tuple
+from tvm.topi.vision import ssd, non_max_suppression, get_valid_counts
 
 _get_valid_counts_implement = {
     "generic": (topi.vision.get_valid_counts, topi.generic.schedule_get_valid_counts),
@@ -93,7 +93,7 @@ def verify_get_valid_counts(dshape, score_threshold, id_index, score_index):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            fcompute, fschedule = topi.testing.dispatch(device, _get_valid_counts_implement)
+            fcompute, fschedule = tvm.topi.testing.dispatch(device, _get_valid_counts_implement)
             data = te.placeholder(dshape, name="data", dtype=dtype)
             outs = fcompute(data, score_threshold, id_index, score_index)
             s = fschedule(outs)
@@ -148,7 +148,7 @@ def verify_non_max_suppression(np_data, np_valid_count, np_indices, np_result, n
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            fcompute, fschedule = topi.testing.dispatch(device, _nms_implement)
+            fcompute, fschedule = tvm.topi.testing.dispatch(device, _nms_implement)
             out = fcompute(data, valid_count, indices, max_output_size, iou_threshold, force_suppress,
                            top_k, coord_start=coord_start, score_index=score_index, id_index=id_index,
                            return_indices=False)
@@ -252,7 +252,7 @@ def verify_multibox_prior(dshape, sizes=(1,), ratios=(1,), steps=(-1, -1), offse
             return
         print("Running on target: %s" % device)
 
-        fcompute, fschedule = topi.testing.dispatch(device, _multibox_prior_implement)
+        fcompute, fschedule = tvm.topi.testing.dispatch(device, _multibox_prior_implement)
         with tvm.target.create(device):
             out = fcompute(data, sizes, ratios, steps, offsets, clip)
             s = fschedule(out)
@@ -297,7 +297,7 @@ def test_multibox_detection():
             return
         print("Running on target: %s" % device)
 
-        fcompute, fschedule = topi.testing.dispatch(device, _multibox_detection_implement)
+        fcompute, fschedule = tvm.topi.testing.dispatch(device, _multibox_detection_implement)
         with tvm.target.create(device):
             out = fcompute(cls_prob, loc_preds, anchors)
             s = fschedule(out)
@@ -326,7 +326,7 @@ def verify_roi_align(batch, in_channel, in_size, num_roi, pooled_size, spatial_s
         a_np = np.random.uniform(size=a_shape).astype('float32')
         rois_np = np.random.uniform(size=rois_shape).astype('float32') * in_size
         rois_np[:, 0] = np.random.randint(low = 0, high = batch, size = num_roi)
-        b_np = topi.testing.roi_align_nchw_python(a_np, rois_np, pooled_size=pooled_size,
+        b_np = tvm.topi.testing.roi_align_nchw_python(a_np, rois_np, pooled_size=pooled_size,
                                                   spatial_scale=spatial_scale,
                                                   sample_ratio=sample_ratio)
 
@@ -342,7 +342,7 @@ def verify_roi_align(batch, in_channel, in_size, num_roi, pooled_size, spatial_s
         print("Running on target: %s" % device)
 
         with tvm.target.create(device):
-            fcompute, fschedule = topi.testing.dispatch(device, _roi_align_implement)
+            fcompute, fschedule = tvm.topi.testing.dispatch(device, _roi_align_implement)
             b = fcompute(a, rois, pooled_size=pooled_size,
                          spatial_scale=spatial_scale,
                          sample_ratio=sample_ratio)
@@ -379,7 +379,7 @@ def verify_roi_pool(batch, in_channel, in_size, num_roi, pooled_size, spatial_sc
         rois_np = np.random.uniform(size=rois_shape).astype('float32') * in_size
         rois_np[:, 0] = np.random.randint(low = 0, high = batch, size = num_roi).astype('float32')
 
-        b_np = topi.testing.roi_pool_nchw_python(a_np, rois_np, pooled_size=pooled_size,
+        b_np = tvm.topi.testing.roi_pool_nchw_python(a_np, rois_np, pooled_size=pooled_size,
                                                  spatial_scale=spatial_scale)
         return a_np, rois_np, b_np
 
@@ -395,7 +395,7 @@ def verify_roi_pool(batch, in_channel, in_size, num_roi, pooled_size, spatial_sc
         with tvm.target.create(device):
             b = topi.vision.rcnn.roi_pool_nchw(a, rois, pooled_size=pooled_size,
                                                 spatial_scale=spatial_scale)
-            s_func = topi.testing.dispatch(device, _roi_pool_schedule)
+            s_func = tvm.topi.testing.dispatch(device, _roi_pool_schedule)
             s = s_func(b)
 
         tvm_a = tvm.nd.array(a_np, ctx)
@@ -426,7 +426,7 @@ def verify_proposal(np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            fcompute, fschedule = topi.testing.dispatch(device, _proposal_implement)
+            fcompute, fschedule = tvm.topi.testing.dispatch(device, _proposal_implement)
             out = fcompute(cls_prob, bbox_pred, im_info, **attrs)
             s = fschedule(out)
             f = tvm.build(s, [cls_prob, bbox_pred, im_info, out], device)
index fa22fdc..b67178e 100644 (file)
@@ -20,7 +20,7 @@
 import threading
 
 from tvm import te, auto_scheduler
-import topi
+from tvm import topi
 
 
 @auto_scheduler.register_workload
index d9c24b9..2530d55 100644 (file)
@@ -17,7 +17,8 @@
 
 """Test ComputeDAG (replay, infer bound)"""
 
-import tvm, topi
+import tvm
+from tvm import topi
 from tvm import auto_scheduler, te
 
 from test_auto_scheduler_common import get_tiled_matmul, matmul_auto_scheduler_test
index 5c501ac..a051e81 100644 (file)
@@ -21,7 +21,7 @@ import numpy as np
 
 import tvm
 from tvm import auto_scheduler, te
-import topi
+from tvm import topi
 
 from test_auto_scheduler_common import matmul_auto_scheduler_test, conv2d_nchw_bn_relu
 
index e65f191..9282667 100644 (file)
@@ -18,7 +18,7 @@
 """ Test measurement and log serialization. """
 
 import tvm
-import topi
+from tvm import topi
 from tvm import te, auto_scheduler
 import tempfile
 
index 343b867..58174dd 100644 (file)
@@ -22,7 +22,7 @@ import numpy as np
 import tvm
 from tvm import te
 from tvm.contrib import graph_runtime, util
-import topi
+from tvm import topi
 
 def get_simplex_graph(host_dev_type, device_dev_type):
     r""" Return the hand-crafted json object where only one copy node is
index 4cd08d0..7fdd259 100644 (file)
@@ -18,7 +18,7 @@
 import tvm
 from tvm import te
 import numpy as np
-import topi
+from tvm import topi
 import unittest
 from tvm.contrib.nvcc import have_fp16, have_int8
 from tvm.contrib import nvcc
@@ -881,14 +881,14 @@ def test_unrolled_vectorization():
 
     dtype = 'float32'
     target = 'cuda'
-    
+
     ## Compute declaration
     N = 128
     A = te.placeholder((N, N), name='A')
     B = te.placeholder((N, N), name='B')
     k = te.reduce_axis((0, N), name='k')
     C = te.compute((N, N), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name='C')
-    
+
     ## Schedule
     s = te.create_schedule([C.op])
     CC = s.cache_write(C, "local")
@@ -903,7 +903,7 @@ def test_unrolled_vectorization():
     ko, ki = s[CC].split(k, 2)
     s[CC].unroll(ki)
     s[CC].vectorize(j)
-    
+
     ## Check correctness
     ctx = tvm.context(target)
     a_tvm = tvm.nd.array(np.ones((N, N)).astype(dtype), ctx=ctx)
index cf82504..519d187 100644 (file)
@@ -16,7 +16,7 @@
 # under the License.
 import tvm
 from tvm import te
-import topi
+from tvm import topi
 from tvm.contrib import util, clang
 import numpy as np
 import ctypes
index f6723e2..eb48d83 100644 (file)
@@ -18,7 +18,7 @@
 import tvm
 from tvm import te
 from ctypes import *
-import topi
+from tvm import topi
 import numpy as np
 
 tgt = "llvm"
index c756de0..5415874 100644 (file)
@@ -18,8 +18,8 @@
 import tvm
 from tvm import te
 from tvm.testing import check_numerical_grads, assert_allclose
-import topi
-from topi.util import get_const_tuple
+from tvm import topi
+from tvm.topi.util import get_const_tuple
 
 import numpy as np
 
index 977dfc3..1f17914 100644 (file)
@@ -16,7 +16,7 @@
 # under the License.
 import tvm
 from tvm import te
-import topi
+from tvm import topi
 import numpy as np
 from tvm.contrib import nvcc
 
index c6196d8..aa87665 100644 (file)
@@ -17,7 +17,7 @@
 import tvm
 from tvm import te
 import numpy as np
-from topi.testing import conv2d_nhwc_python
+from tvm.topi.testing import conv2d_nhwc_python
 from tvm.contrib import nvcc
 
 VERIFY = True
index 662eff0..3d22c0f 100644 (file)
@@ -17,7 +17,7 @@
 import tvm
 import numpy as np
 from tvm import te
-from topi.nn.pooling import pool
+from tvm.topi.nn.pooling import pool
 
 def test_tensor():
     m = te.size_var('m')
index 2e46962..9714368 100644 (file)
@@ -17,9 +17,9 @@
 import numpy as np
 import tvm
 from tvm import te
-import topi
-import topi.testing
-from topi.util import get_const_tuple
+from tvm import topi
+import tvm.topi.testing
+from tvm.topi.util import get_const_tuple
 
 
 def test_operator_type_and_tags():
@@ -109,7 +109,7 @@ def verify_tensor_scalar_bop(shape, typ="add"):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_elemwise_schedule(device)(B)
+            s = tvm.topi.testing.get_elemwise_schedule(device)(B)
 
         k_ = 2
         foo = tvm.build(s, [A, B, k] + sh, device, name="tensor_scalar_" + typ)
@@ -155,7 +155,7 @@ def verify_broadcast_bop(lhs_shape, rhs_shape, typ="add"):
             return
         print("Running on target: %s" % device)
         with tvm.target.create(device):
-            s = topi.testing.get_broadcast_schedule(device)(C)
+            s = tvm.topi.testing.get_broadcast_schedule(device)(C)
 
         foo = tvm.build(s, [A, B, C], device, name="broadcast_binary" + "_" + typ)
         lhs_npy = np.random.uniform(size=lhs_shape).astype(A.dtype)
@@ -191,7 +191,7 @@ def verify_conv2d_scalar_bop(batch, in_size, in_channel, num_filter, kernel, str
             return
         print("Running on target: %s" % device)
 
-        conv2d_nchw, schedule_conv2d_nchw = topi.testing.get_conv2d_nchw_implement(device)
+        conv2d_nchw, schedule_conv2d_nchw = tvm.topi.testing.get_conv2d_nchw_implement(device)
 
         k = 10.0
         dilation = (1, 1)
@@ -215,7 +215,7 @@ def verify_conv2d_scalar_bop(batch, in_size, in_channel, num_filter, kernel, str
 
         a_npy = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
         w_npy = np.random.uniform(size=get_const_tuple(W.shape)).astype(W.dtype)
-        b_npy = topi.testing.conv2d_nchw_python(a_npy, w_npy, stride, padding)
+        b_npy = tvm.topi.testing.conv2d_nchw_python(a_npy, w_npy, stride, padding)
         c_npy = np.random.uniform(size=get_const_tuple(B.shape)).astype(B.dtype)
         if typ == "add":
             c_npy = b_npy + k
index 86a71da..c3a6661 100644 (file)
@@ -18,7 +18,7 @@
 
 import tvm
 from tvm import te
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
 
 def test_layout():
     layout = tvm.tir.layout("NCHW16c")
index 26bf80f..0920603 100644 (file)
@@ -16,7 +16,7 @@
 # under the License.
 import tvm
 from tvm import te
-import topi
+from tvm import topi
 from tvm.contrib import util, clang
 import numpy as np
 import ctypes
@@ -84,7 +84,7 @@ def test_unary_intrin():
         f(a, b)
         tvm.testing.assert_allclose(
             b.asnumpy(), np_func(a.asnumpy()), atol=1e-5, rtol=1e-5)
-    
+
     for func in test_funcs:
         run_test(*func)
 
index 599ddba..1d57db6 100644 (file)
@@ -15,7 +15,7 @@
 # specific language governing permissions and limitations
 # under the License.
 import tvm
-import topi
+from tvm import topi
 from tvm import te
 
 
index ce8c16e..73642e0 100644 (file)
@@ -430,7 +430,7 @@ def test_conv_tiling():
 
 
 def test_multilevel_splitting_with_indivisble_factors():
-    import topi
+    from tvm import topi
     A = te.placeholder((130,), dtype="float32")
     B = topi.nn.relu(A)
     s = te.create_schedule(B.op)
index 414186c..61c079a 100755 (executable)
@@ -27,4 +27,4 @@ fi
 set -u
 
 export TVM_PATH=`pwd`
-export PYTHONPATH=${TVM_PATH}/python:${TVM_PATH}/topi/python
+export PYTHONPATH=${TVM_PATH}/python
index 0ff6c39..7a93b47 100755 (executable)
@@ -22,7 +22,7 @@ set -u
 export LD_LIBRARY_PATH="lib:${LD_LIBRARY_PATH:-}"
 
 tvm_root="$(git rev-parse --show-toplevel)"
-export PYTHONPATH="$tvm_root/python":"$tvm_root/topi/python"
+export PYTHONPATH="$tvm_root/python"
 
 # to avoid CI CPU thread throttling.
 export TVM_BIND_THREADS=0
index e483d5f..3bc3caf 100755 (executable)
@@ -31,4 +31,4 @@ make cython3
 # cleanup pycache
 find . -type f -path "*.pyc" | xargs rm -f
 
-python3 -m pytest topi/tests/python
+python3 -m pytest tests/python/topi/
index 6d159f6..d7b9a5b 100755 (executable)
@@ -22,7 +22,7 @@ set -u
 export TVM_HOME="$(git rev-parse --show-toplevel)"
 
 export LD_LIBRARY_PATH="$TVM_HOME/lib:$TVM_HOME/build:${LD_LIBRARY_PATH:-}"
-export PYTHONPATH="$TVM_HOME/python":"$TVM_HOME/topi/python"
+export PYTHONPATH="$TVM_HOME/python"
 export RUST_DIR="$TVM_HOME/rust"
 
 
diff --git a/topi/python/setup.py b/topi/python/setup.py
deleted file mode 100644 (file)
index 6837179..0000000
+++ /dev/null
@@ -1,127 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-# pylint: disable=invalid-name, exec-used
-"""Setup TOPI package."""
-from __future__ import absolute_import
-import os
-import shutil
-import sys
-
-from setuptools import find_packages
-from setuptools.dist import Distribution
-
-if "--inplace" in sys.argv:
-    from distutils.core import setup
-    from distutils.extension import Extension
-else:
-    from setuptools import setup
-    from setuptools.extension import Extension
-
-CURRENT_DIR = os.path.dirname(__file__)
-
-
-def get_lib_names():
-    if sys.platform.startswith('win32'):
-        return ['libtvm_topi.dll', 'tvm_topi.dll']
-    if sys.platform.startswith('darwin'):
-        return ['libtvm_topi.dylib', 'tvm_topi.dylib']
-    return ['libtvm_topi.so', 'tvm_topi.so']
-
-
-def get_lib_path():
-    """Get library path, name and version"""
-    # We can not import `libinfo.py` in setup.py directly since __init__.py
-    # Will be invoked which introduces dependences
-    libinfo_py = os.path.join(CURRENT_DIR, '../../python/tvm/_ffi/libinfo.py')
-    libinfo = {'__file__': libinfo_py}
-    exec(compile(open(libinfo_py, "rb").read(),
-                 libinfo_py, 'exec'), libinfo, libinfo)
-    version = libinfo['__version__']
-    if not os.getenv('CONDA_BUILD'):
-        lib_path = libinfo['find_lib_path'](get_lib_names())
-        libs = [lib_path[0]]
-        if libs[0].find("runtime") == -1:
-            for name in lib_path[1:]:
-                if name.find("runtime") != -1:
-                    libs.append(name)
-                    break
-    else:
-        libs = None
-    return libs, version
-
-
-LIB_LIST, __version__ = get_lib_path()
-
-if not os.getenv('CONDA_BUILD'):
-    curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
-    for i, path in enumerate(LIB_LIST):
-        LIB_LIST[i] = os.path.relpath(path, curr_path)
-    setup_kwargs = {
-        "include_package_data": True,
-        "data_files": [('topi', LIB_LIST)]
-    }
-else:
-    setup_kwargs = {}
-
-
-include_libs = False
-wheel_include_libs = False
-if not os.getenv('CONDA_BUILD'):
-    if "bdist_wheel" in sys.argv:
-        wheel_include_libs = True
-    else:
-        include_libs = True
-
-# For bdist_wheel only
-if wheel_include_libs:
-    with open("MANIFEST.in", "w") as fo:
-        for path in LIB_LIST:
-            shutil.copy(path, os.path.join(CURRENT_DIR, 'topi'))
-            _, libname = os.path.split(path)
-            fo.write("include topi/%s\n" % libname)
-    setup_kwargs = {
-        "include_package_data": True
-    }
-
-if include_libs:
-    curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
-    for i, path in enumerate(LIB_LIST):
-        LIB_LIST[i] = os.path.relpath(path, curr_path)
-    setup_kwargs = {
-        "include_package_data": True,
-        "data_files": [('topi', LIB_LIST)]
-    }
-
-setup(name='topi',
-      version=__version__,
-      description="TOPI: TVM operator index",
-      install_requires=[
-          "numpy",
-          "decorator",
-      ],
-      packages=find_packages(),
-      url='https://github.com/apache/incubator-tvm',
-      **setup_kwargs)
-
-
-if wheel_include_libs:
-    # Wheel cleanup
-    os.remove("MANIFEST.in")
-    for path in LIB_LIST:
-        _, libname = os.path.split(path)
-        os.remove("topi/%s" % libname)
diff --git a/topi/python/topi/cpp/impl.py b/topi/python/topi/cpp/impl.py
deleted file mode 100644 (file)
index 1081baa..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-"""Load Lib for C++ TOPI ops and schedules"""
-import sys
-import os
-import ctypes
-import tvm._ffi
-
-from tvm._ffi import libinfo
-
-def _get_lib_names():
-    if sys.platform.startswith('win32'):
-        return ['libtvm_topi.dll', 'tvm_topi.dll']
-    if sys.platform.startswith('darwin'):
-        return ['libtvm_topi.dylib', 'tvm_topi.dylib']
-    return ['libtvm_topi.so', 'tvm_topi.so']
-
-def _load_lib():
-    """Load libary by searching possible path."""
-    curr_path = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))
-    lib_search = [curr_path, os.path.dirname(curr_path)]
-    lib_path = libinfo.find_lib_path(_get_lib_names(), lib_search, optional=True)
-    if lib_path is None:
-        return None, None
-    lib = ctypes.CDLL(lib_path[0], ctypes.RTLD_GLOBAL)
-    return lib, os.path.basename(lib_path[0])
-
-_LIB, _LIB_NAME = _load_lib()
-
-tvm._ffi._init_api("topi", "topi.cpp")
index 3cdbb84..9043151 100644 (file)
@@ -50,8 +50,8 @@ import numpy as np
 
 import tvm
 from tvm import te
-import topi
-from topi.testing import conv2d_nchw_python
+from tvm import topi
+from tvm.topi.testing import conv2d_nchw_python
 
 from tvm import autotvm
 
index 55eb3aa..2e3e389 100644 (file)
@@ -79,7 +79,7 @@ from tvm.contrib.download import download_testdata
 #
 # .. code-block:: bash
 #
-#   echo 'export PYTHONPATH=/workspace/python:/workspace/topi/python:/workspace/vta/python:${PYTHONPATH}' >> ~/.bashrc
+#   echo 'export PYTHONPATH=/workspace/python:/workspace/vta/python:${PYTHONPATH}' >> ~/.bashrc
 #   source ~/.bashrc
 
 #################################################################
index a6cd801..7edcde9 100644 (file)
@@ -39,7 +39,7 @@ how to use TEDD and how to interpret the rendered graphs.
 """
 import tvm
 from tvm import te
-import topi
+from tvm import topi
 from tvm.contrib import tedd
 
 ######################################################################
index 8a77c77..ac5b50f 100644 (file)
@@ -180,7 +180,7 @@ print(tvm.lower(s, [A, B, C], simple_mode=True))
 #
 func = tvm.build(s, [A, B, C], target="llvm", name="gemv")
 
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
 dtype = A.dtype
 ctx = tvm.context("cpu", 0)
 a = np.random.uniform(size=get_const_tuple(A.shape)).astype(dtype)
index 5bb5f0a..5938b69 100644 (file)
@@ -27,7 +27,7 @@ from __future__ import absolute_import, print_function
 
 import tvm
 from tvm import te
-import topi
+from tvm import topi
 import numpy as np
 
 ######################################################################
index 70c003c..d39f982 100644 (file)
@@ -30,7 +30,7 @@ from .rpc_client import reconfig_runtime, program_fpga
 
 __version__ = "0.1.0"
 
-# do not import topi when running vta.exec.rpc_server
+# do not from tvm import topi when running vta.exec.rpc_server
 # to maintain minimum dependency on the board
 if sys.argv[0] not in ("-c", "-m"):
     from . import top
index 7a07100..48a5c1c 100644 (file)
@@ -21,7 +21,7 @@ from __future__ import absolute_import as _abs
 
 import tvm
 from tvm import te
-from topi import util
+from tvm.topi import util
 
 from tvm.relay.op.op import register_compute, register_injective_schedule
 from tvm.relay.op.op import register_pattern, OpPattern
index 2198ed4..8280798 100644 (file)
@@ -20,7 +20,7 @@ from __future__ import absolute_import as _abs
 
 import tvm
 from tvm import te
-import topi
+from tvm import topi
 
 from tvm.relay.op import op as reg
 from tvm.relay.op import strategy as _strategy
index 5b23dde..799b105 100644 (file)
@@ -21,7 +21,7 @@ import numpy as np
 import tvm
 from tvm import te
 from tvm import autotvm
-import topi
+from tvm import topi
 
 from .util import is_packed_layout
 from ..environment import get_env
index ddfebc2..ea0dfce 100644 (file)
@@ -21,9 +21,9 @@ import numpy as np
 import tvm
 from tvm import te
 from tvm import autotvm
-import topi
-from topi.util import get_const_tuple
-from topi.nn.util import get_pad_tuple
+from tvm import topi
+from tvm.topi.util import get_const_tuple
+from tvm.topi.nn.util import get_pad_tuple
 
 from ..environment import get_env
 
index 912f41f..0b98261 100644 (file)
@@ -21,7 +21,7 @@ import numpy as np
 import tvm
 from tvm import te
 from tvm import autotvm
-import topi
+from tvm import topi
 
 from ..environment import get_env
 
index d470fb7..36768c3 100644 (file)
@@ -21,7 +21,7 @@ import numpy as np
 import tvm
 from tvm import te
 from tvm import autotvm
-import topi
+from tvm import topi
 
 from ..environment import get_env
 
index d9f47f1..eb051f5 100644 (file)
@@ -18,7 +18,7 @@
 # pylint: disable=len-as-condition, no-else-return, unused-argument, invalid-name
 import tvm
 from tvm import te
-from topi import util
+from tvm.topi import util
 
 from .environment import get_env
 
index 6d0b5d4..6095d96 100644 (file)
@@ -24,7 +24,7 @@ import os
 import tvm
 from tvm import te
 from tvm import autotvm
-import topi
+from tvm import topi
 import vta
 import vta.testing
 
index b7c380e..551e6f9 100644 (file)
@@ -24,7 +24,7 @@ import os
 import tvm
 from tvm import te
 from tvm import autotvm
-import topi
+from tvm import topi
 import vta
 import vta.testing
 
index e54de1d..b1711fa 100644 (file)
@@ -24,7 +24,7 @@ import os
 import tvm
 from tvm import te
 from tvm import autotvm
-import topi
+from tvm import topi
 import vta
 import vta.testing
 
index 72f9525..d8dcc02 100644 (file)
@@ -24,7 +24,7 @@ import os
 import tvm
 from tvm import te
 from tvm import autotvm
-import topi
+from tvm import topi
 import vta
 import vta.testing
 
index 2d358d3..2d15335 100644 (file)
@@ -22,7 +22,7 @@ from mxnet.gluon.model_zoo import vision
 import numpy as np
 from PIL import Image
 
-import topi
+from tvm import topi
 import tvm
 from tvm import te
 from tvm import rpc, autotvm, relay
index b3c36e8..3affbac 100644 (file)
@@ -30,8 +30,8 @@ from tvm import relay
 from tvm import autotvm
 from tvm.contrib import util
 from tvm.contrib.pickle_memoize import memoize
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 import vta
 from vta import program_fpga, reconfig_runtime
 import vta.testing
@@ -143,7 +143,7 @@ def run_conv2d(env, remote, wl, target,
         a_np = np.random.randint(a_min, a_max, size=a_shape).astype(data.dtype)
         w_np = np.random.randint(w_min, w_max, size=w_shape).astype(kernel.dtype)
         b_np = np.random.randint(b_min, b_max, size=b_shape).astype(env.acc_dtype)
-        r_np = topi.testing.conv2d_nchw_python(
+        r_np = tvm.topi.testing.conv2d_nchw_python(
             a_np.astype(env.acc_dtype), w_np.astype(env.acc_dtype), (wl.hstride, wl.wstride), wl.hpad).astype(env.acc_dtype)
         return a_np, w_np, b_np, r_np
 
index 558c3ab..80a6848 100644 (file)
@@ -30,8 +30,8 @@ from tvm import relay
 from tvm import autotvm
 from tvm.contrib import util
 from tvm.contrib.pickle_memoize import memoize
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 import vta
 from vta import program_fpga, reconfig_runtime
 import vta.testing
@@ -134,7 +134,7 @@ def run_conv2d_transpose(env, remote, wl, target,
         w_min, w_max = 0 - (1 << (env.WGT_WIDTH - 1)), (1 << (env.WGT_WIDTH - 1))
         a_np = np.random.randint(a_min, a_max, size=a_shape).astype(data.dtype)
         w_np = np.random.randint(w_min, w_max, size=(wl.in_filter, wl.out_filter, wl.hkernel, wl.wkernel)).astype(kernel.dtype)
-        r_np = topi.testing.conv2d_transpose_nchw_python(
+        r_np = tvm.topi.testing.conv2d_transpose_nchw_python(
             a_np.astype(env.acc_dtype), w_np.astype(env.acc_dtype), (wl.hstride, wl.wstride), wl.hpad, (wl.o_hpad, wl.o_wpad)).astype(env.acc_dtype)
         return a_np, w_np, r_np
 
index 95c491a..3affb36 100644 (file)
@@ -28,8 +28,8 @@ from tvm import te
 from tvm import autotvm
 from tvm.contrib import util
 from tvm.contrib.pickle_memoize import memoize
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 import vta
 from vta import program_fpga, reconfig_runtime
 import vta.testing
index 1d5838c..1fed5a0 100644 (file)
@@ -29,8 +29,8 @@ from tvm import te
 from tvm import relay
 from tvm import autotvm
 from tvm.contrib import util
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
 import vta
 from vta import program_fpga, reconfig_runtime
 import vta.testing
@@ -135,7 +135,7 @@ def run_group_conv2d(env, remote, wl, target,
         a_np = np.random.randint(a_min, a_max, size=a_shape).astype(data.dtype)
         w_np = np.random.randint(w_min, w_max, size=w_shape).astype(kernel.dtype)
         b_np = np.random.randint(b_min, b_max, size=b_shape).astype(env.acc_dtype)
-        r_np = topi.testing.conv2d_nchw_python(
+        r_np = tvm.topi.testing.conv2d_nchw_python(
             a_np.astype(env.acc_dtype), w_np.astype(env.acc_dtype),
             (wl.hstride, wl.wstride), wl.hpad, wl.groups).astype(env.acc_dtype)
         return a_np, w_np, b_np, r_np
index c76636a..be347a0 100644 (file)
@@ -18,7 +18,7 @@
 import tvm
 from tvm import te
 import numpy as np
-import topi
+from tvm import topi
 from tvm.contrib import util
 
 import vta
index a92b1ee..1184006 100644 (file)
@@ -58,7 +58,7 @@ from mxnet.gluon.model_zoo import vision
 import numpy as np
 from PIL import Image
 
-import topi
+from tvm import topi
 import tvm
 from tvm import te
 from tvm import rpc, autotvm, relay
index 0564a6a..d364fef 100644 (file)
@@ -123,7 +123,7 @@ elif env.TARGET in ["sim", "tsim"]:
 #        :align: center
 #        :width: 480px
 
-import topi
+from tvm import topi
 
 # 2D convolution layer dimensions taken from ResNet-18 architecture
 # (9th convolutional layer)
@@ -371,7 +371,7 @@ print(vta.lower(s, [data, kernel, res], simple_mode=True))
 # ensure correctness.
 
 # This library facilitates 2D convolution testing
-from topi.testing import conv2d_nchw_python
+from tvm.topi.testing import conv2d_nchw_python
 
 # Compile the TVM module
 my_conv = vta.build(s, [data, kernel, res], "ext_dev", env.target_host, name="my_conv")