This PR migrates the topi library as a sub namespace of tvm.
file(GLOB TOPI_SRCS
- topi/src/*.cc
+ src/topi/*.cc
)
file(GLOB RUNTIME_SRCS
set(CMAKE_CUDA_STANDARD 14)
endif()
-add_library(tvm SHARED ${COMPILER_SRCS} ${RUNTIME_SRCS})
+add_library(tvm SHARED ${COMPILER_SRCS} ${RUNTIME_SRCS} ${TOPI_SRCS})
add_library(tvm_topi SHARED ${TOPI_SRCS})
add_library(tvm_runtime SHARED ${RUNTIME_SRCS})
target_link_libraries(tvm_runtime ${HIDE_SYMBOLS_LINKER_FLAGS})
endif()
-# Related headers
-target_include_directories(
- tvm
- PUBLIC "topi/include")
-target_include_directories(
- tvm_topi
- PUBLIC "topi/include")
-
-
# Tests
set(TEST_EXECS "")
file(GLOB TEST_SRCS tests/cpp/*.cc)
PATTERN "*.h"
)
install(
- DIRECTORY "topi/include/." DESTINATION "include"
- FILES_MATCHING
- PATTERN "*.h"
- )
- install(
DIRECTORY "3rdparty/dlpack/include/." DESTINATION "include"
FILES_MATCHING
PATTERN "*.h"
# Lint scripts
cpplint:
python3 3rdparty/dmlc-core/scripts/lint.py vta cpp vta/include vta/src
- python3 3rdparty/dmlc-core/scripts/lint.py topi cpp topi/include;
python3 3rdparty/dmlc-core/scripts/lint.py tvm cpp \
include src \
examples/extension/src examples/graph_executor/src
pylint:
python3 -m pylint python/tvm --rcfile=$(ROOTDIR)/tests/lint/pylintrc
- python3 -m pylint topi/python/topi --rcfile=$(ROOTDIR)/tests/lint/pylintrc
python3 -m pylint vta/python/vta --rcfile=$(ROOTDIR)/tests/lint/pylintrc
jnilint:
LOCAL_C_INCLUDES := $(ROOT_PATH)/include \
$(ROOT_PATH)/3rdparty/dlpack/include \
$(ROOT_PATH)/3rdparty/dmlc-core/include \
- $(ROOT_PATH)/3rdparty/HalideIR/src \
- $(ROOT_PATH)/topi/include
LOCAL_MODULE = tvm4j_runtime_packed
LOCAL_C_INCLUDES := $(ROOT_PATH)/include \
$(ROOT_PATH)/3rdparty/dlpack/include \
- $(ROOT_PATH)/3rdparty/dmlc-core/include \
- $(ROOT_PATH)/topi/include
+ $(ROOT_PATH)/3rdparty/dmlc-core/include
LOCAL_MODULE = tvm4j_runtime_packed
LOCAL_C_INCLUDES := $(ROOT_PATH)/include \
$(ROOT_PATH)/3rdparty/dlpack/include \
- $(ROOT_PATH)/3rdparty/dmlc-core/include \
- $(ROOT_PATH)/topi/include
+ $(ROOT_PATH)/3rdparty/dmlc-core/include
LOCAL_MODULE = tvm4j_runtime_packed
concat!(
mf_dir!("/../../python"),
":",
- mf_dir!("/../../nnvm/python"),
- ":",
- mf_dir!("/../../topi/python")
+ mf_dir!("/../../nnvm/python")
),
)
.output()
<!--- specific language governing permissions and limitations -->
<!--- under the License. -->
-# TOPI: TVM Operator Inventory
+# TOPI Recipe: TVM Operator Optimization Recipes
TOPI is the operator collection library for TVM intended at sharing the effort of crafting
and optimizing tvm generated kernels. The goal:
- Give common primitives for fused op creation.
- Provide commonly used schedules under each architectures
-## Organization
-- [include](include) C++ library, header only
-- [python](python) python library
-- [recipe](recipe) Recipe collections containing useful operator examples.
-
## Guidelines
- Use numpy-style naming convention for known ops
- Seperate operator declaration from schedule when possible.
- Data layout aware, if not specified in argument or in function, assume NCHW by default.
-## Testcase
-- Add testcases to testout the schedule and dataflow in the TOPI workflow
-- Only do correctness testing without attaching compiler flags and only run it once.
-
## Performance Tuning Workflow
Since TVM is work in progress, some optimization might not be perfect.
One quick way I find useful is to do codegen plus manual modification.
from tvm.contrib import nvcc
import numpy as np
-import topi
+from tvm import topi
TASK = "reduce_map"
from scipy import signal
from tvm.contrib import nvcc
-import topi
-from topi.util import get_const_tuple
-from topi.cuda.depthwise_conv2d import schedule_depthwise_conv2d_nchw, schedule_depthwise_conv2d_nhwc
+from tvm import topi
+from tvm.topi.util import get_const_tuple
+from tvm.topi.cuda.depthwise_conv2d import schedule_depthwise_conv2d_nchw, schedule_depthwise_conv2d_nhwc
TASK = "depthwise_conv2d"
USE_MANUAL_CODE = False
print("average time cost of 1000 runs (depthwise_conv2d + scale_shift) = %g us" % (tcost_2*1e6))
print("average time cost of 1000 runs (depthwise_conv2d + scale_shift + relu) = %g us" % (tcost_3*1e6))
# correctness
- depthwise_conv2d_scipy = topi.testing.depthwise_conv2d_python_nchw(input_np, filter_np, stride=[stride_h, stride_w], padding=padding)
+ depthwise_conv2d_scipy = tvm.topi.testing.depthwise_conv2d_python_nchw(input_np, filter_np, stride=[stride_h, stride_w], padding=padding)
scale_shift_scipy = np.zeros(shape=get_const_tuple(ScaleShift.shape))
for c in range(in_channel * channel_multiplier):
scale_shift_scipy[:,c,:,:] = depthwise_conv2d_scipy[:,c,:,:] * scale_np[c] + shift_np[c]
print("average time cost of 1000 runs (depthwise_conv2d + scale_shift) = %g us" % (tcost_2*1e6))
print("average time cost of 1000 runs (depthwise_conv2d + scale_shift + relu) = %g us" % (tcost_3*1e6))
# correctness
- depthwise_conv2d_scipy = topi.testing.depthwise_conv2d_python_nhwc(input_np, filter_np, stride=[stride_h, stride_w], padding=padding)
+ depthwise_conv2d_scipy = tvm.topi.testing.depthwise_conv2d_python_nhwc(input_np, filter_np, stride=[stride_h, stride_w], padding=padding)
scale_shift_scipy = np.zeros(shape=get_const_tuple(ScaleShift.shape))
for c in range(in_channel * channel_multiplier):
scale_shift_scipy[:,:,:,c] = depthwise_conv2d_scipy[:,:,:,c] * scale_np[c] + shift_np[c]
import tvm
from tvm import te
from tvm.contrib import nvcc
-import topi
-from topi.util import get_const_tuple
+from tvm import topi
+from tvm.topi.util import get_const_tuple
TASK = "conv2d_hwcn_map"
USE_MANUAL_CODE = False
a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
w_np = np.random.uniform(size=get_const_tuple(W.shape)).astype(W.dtype)
- b_np = topi.testing.conv2d_hwcn_python(a_np, w_np, stride, padding)
+ b_np = tvm.topi.testing.conv2d_hwcn_python(a_np, w_np, stride, padding)
c_np = np.maximum(b_np, 0)
def check_device(device):
import numpy as np
import tvm
from tvm import te
-import topi
+from tvm import topi
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
LOGGER = logging.getLogger('test_conv_int8_intel')
import numpy as np
import tvm
from tvm import te
-import topi
+from tvm import topi
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
LOGGER = logging.getLogger('test_conv_int8_intel')
import tvm
from tvm import te
from tvm import autotvm
-from topi.cuda.tensor_intrin import dp4a
+from tvm.topi.cuda.tensor_intrin import dp4a
DO_TUNING = True
PRETUNED_INDEX = 75333
from tvm.contrib import nvcc
import numpy as np
-import topi
+from tvm import topi
TASK = "reduce_map"
cd python
$PYTHON setup.py install --single-version-externally-managed --record=/tmp/record.txt
cd ..
-
-cd topi/python
-$PYTHON setup.py install --single-version-externally-managed --record=/tmp/record.txt
-cd ../..
make -j10
# Environment variables
-ENV PYTHONPATH=/usr/tvm/python:/usr/tvm/topi/python:/usr/tvm/vta/python:${PYTHONPATH}
+ENV PYTHONPATH=/usr/tvm/python:/usr/tvm/vta/python:${PYTHONPATH}
ENV ANDROID_HOME=/opt/android-sdk-linux/
RUN bash /install/install_tvm_cpu.sh
# Environment variables
-ENV PYTHONPATH=/usr/tvm/python:/usr/tvm/topi/python:/usr/tvm/vta/python:${PYTHONPATH}
+ENV PYTHONPATH=/usr/tvm/python:/usr/tvm/vta/python:${PYTHONPATH}
RUN bash /install/install_tvm_gpu.sh
# Environment variables
-ENV PYTHONPATH=/usr/tvm/python:/usr/tvm/topi/python:/usr/tvm/vta/python:${PYTHONPATH}
+ENV PYTHONPATH=/usr/tvm/python:/usr/tvm/vta/python:${PYTHONPATH}
ENV PATH=/usr/local/nvidia/bin:${PATH}
ENV PATH=/usr/local/cuda/bin:${PATH}
ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/nvidia/lib64:${LD_LIBRARY_PATH}
make -j6
RUN echo "Building Python package"
-ENV PYTHONPATH=${TVM_HOME}/python:${TVM_HOME}/topi/python:${PYTHONPATH}
+ENV PYTHONPATH=${TVM_HOME}/python:${PYTHONPATH}
RUN cd ${TVM_HOME}/python && python3 setup.py install --user
-RUN cd ${TVM_HOME}/topi/python && python3 setup.py install --user
fi
if [[ "${DOCKER_IMAGE_NAME}" == *"ci"* ]]; then
- CI_PY_ENV="-e PYTHONPATH=/workspace/python:/workspace/topi/python"
+ CI_PY_ENV="-e PYTHONPATH=/workspace/python"
else
CI_PY_ENV=""
fi
# spaces.
# Note: If this tag is empty the current directory is searched.
-INPUT = include/tvm topi/include/topi
+INPUT = include/tvm
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
micro
contrib
graph_runtime
- vta/index
topi
+ vta/index
specific language governing permissions and limitations
under the License.
-topi
-----
-.. automodule:: topi
-
-List of operators
-~~~~~~~~~~~~~~~~~
-
-.. autosummary::
-
- topi.identity
- topi.negative
- topi.floor
- topi.ceil
- topi.sign
- topi.trunc
- topi.round
- topi.abs
- topi.isnan
- topi.isfinite
- topi.isinf
- topi.exp
- topi.tanh
- topi.log
- topi.sqrt
- topi.rsqrt
- topi.sigmoid
- topi.clip
- topi.cast
- topi.reinterpret
- topi.transpose
- topi.flip
- topi.reverse_sequence
- topi.strided_slice
- topi.expand_dims
- topi.reshape
- topi.unravel_index
- topi.sparse_to_dense
- topi.squeeze
- topi.concatenate
- topi.split
- topi.take
- topi.gather
- topi.gather_nd
- topi.full
- topi.full_like
- topi.nn.relu
- topi.nn.leaky_relu
- topi.nn.dilate
- topi.nn.pool
- topi.nn.global_pool
- topi.nn.adaptive_pool
- topi.nn.upsampling
- topi.nn.softmax
- topi.nn.dense
- topi.nn.batch_matmul
- topi.nn.log_softmax
- topi.nn.conv2d_nchw
- topi.nn.conv2d_hwcn
- topi.nn.depthwise_conv2d_nchw
- topi.nn.depthwise_conv2d_nhwc
- topi.nn.fifo_buffer
- topi.max
- topi.sum
- topi.min
- topi.argmax
- topi.argmin
- topi.prod
- topi.broadcast_to
- topi.add
- topi.subtract
- topi.multiply
- topi.divide
- topi.mod
- topi.maximum
- topi.minimum
- topi.power
- topi.greater
- topi.less
- topi.equal
- topi.not_equal
- topi.greater_equal
- topi.less_equal
- topi.all
- topi.any
- topi.logical_and
- topi.logical_or
- topi.logical_not
- topi.logical_xor
- topi.arange
- topi.meshgrid
- topi.stack
- topi.repeat
- topi.tile
- topi.shape
- topi.ndarray_size
- topi.layout_transform
- topi.image.resize
- topi.image.crop_and_resize
- topi.image.dilation2d
- topi.argsort
- topi.topk
- topi.sequence_mask
- topi.one_hot
-
-
-List of schedules
-~~~~~~~~~~~~~~~~~
-.. autosummary::
-
- topi.generic.schedule_conv2d_nchw
- topi.generic.schedule_depthwise_conv2d_nchw
- topi.generic.schedule_reduce
- topi.generic.schedule_broadcast
- topi.generic.schedule_injective
+tvm.topi
+--------
+.. automodule:: tvm.topi
+ :members:
+ :imported-members:
+ :autosummary:
+
+tvm.topi.nn
+~~~~~~~~~~~
-topi
-~~~~
-.. autofunction:: topi.negative
-.. autofunction:: topi.identity
-.. autofunction:: topi.floor
-.. autofunction:: topi.ceil
-.. autofunction:: topi.sign
-.. autofunction:: topi.trunc
-.. autofunction:: topi.round
-.. autofunction:: topi.abs
-.. autofunction:: topi.isnan
-.. autofunction:: topi.isfinite
-.. autofunction:: topi.isinf
-.. autofunction:: topi.exp
-.. autofunction:: topi.tanh
-.. autofunction:: topi.log
-.. autofunction:: topi.sqrt
-.. autofunction:: topi.rsqrt
-.. autofunction:: topi.sigmoid
-.. autofunction:: topi.clip
-.. autofunction:: topi.cast
-.. autofunction:: topi.reinterpret
-.. autofunction:: topi.transpose
-.. autofunction:: topi.flip
-.. autofunction:: topi.reverse_sequence
-.. autofunction:: topi.strided_slice
-.. autofunction:: topi.expand_dims
-.. autofunction:: topi.reshape
-.. autofunction:: topi.unravel_index
-.. autofunction:: topi.sparse_to_dense
-.. autofunction:: topi.squeeze
-.. autofunction:: topi.concatenate
-.. autofunction:: topi.split
-.. autofunction:: topi.take
-.. autofunction:: topi.gather
-.. autofunction:: topi.gather_nd
-.. autofunction:: topi.full
-.. autofunction:: topi.full_like
-.. autofunction:: topi.all
-.. autofunction:: topi.any
-.. autofunction:: topi.max
-.. autofunction:: topi.sum
-.. autofunction:: topi.min
-.. autofunction:: topi.prod
-.. autofunction:: topi.broadcast_to
-.. autofunction:: topi.add
-.. autofunction:: topi.subtract
-.. autofunction:: topi.multiply
-.. autofunction:: topi.divide
-.. autofunction:: topi.floor_divide
-.. autofunction:: topi.mod
-.. autofunction:: topi.floor_mod
-.. autofunction:: topi.maximum
-.. autofunction:: topi.minimum
-.. autofunction:: topi.power
-.. autofunction:: topi.greater
-.. autofunction:: topi.less
-.. autofunction:: topi.arange
-.. autofunction:: topi.meshgrid
-.. autofunction:: topi.stack
-.. autofunction:: topi.repeat
-.. autofunction:: topi.tile
-.. autofunction:: topi.shape
-.. autofunction:: topi.ndarray_size
-.. autofunction:: topi.layout_transform
-.. autofunction:: topi.argsort
-.. autofunction:: topi.topk
-.. autofunction:: topi.sequence_mask
-.. autofunction:: topi.one_hot
-.. autofunction:: topi.logical_and
-.. autofunction:: topi.logical_or
-.. autofunction:: topi.logical_not
-.. autofunction:: topi.logical_xor
+.. automodule:: tvm.topi.nn
+ :members:
+ :imported-members:
+ :autosummary:
-topi.nn
-~~~~~~~
-.. autofunction:: topi.nn.relu
-.. autofunction:: topi.nn.leaky_relu
-.. autofunction:: topi.nn.dilate
-.. autofunction:: topi.nn.pool
-.. autofunction:: topi.nn.global_pool
-.. autofunction:: topi.nn.upsampling
-.. autofunction:: topi.nn.softmax
-.. autofunction:: topi.nn.dense
-.. autofunction:: topi.nn.batch_matmul
-.. autofunction:: topi.nn.log_softmax
-.. autofunction:: topi.nn.conv2d_nchw
-.. autofunction:: topi.nn.conv2d_hwcn
-.. autofunction:: topi.nn.depthwise_conv2d_nchw
-.. autofunction:: topi.nn.depthwise_conv2d_nhwc
-.. autofunction:: topi.nn.conv3d_ncdhw
-.. autofunction:: topi.nn.conv3d_transpose_ncdhw
-.. autofunction:: topi.nn.fifo_buffer
+tvm.topi.image
+~~~~~~~~~~~~~~
+.. automodule:: tvm.topi.image
+ :members:
+ :imported-members:
+ :autosummary:
-topi.image
-~~~~~~~~~~
-.. autofunction:: topi.image.resize
-.. autofunction:: topi.image.crop_and_resize
-topi.sparse
-~~~~~~~~~~~
-.. autofunction:: topi.sparse.csrmv
-.. autofunction:: topi.sparse.csrmm
-.. autofunction:: topi.sparse.dense
+tvm.topi.sparse
+~~~~~~~~~~~~~~~
+.. automodule:: tvm.topi.sparse
+ :members:
+ :imported-members:
+ :autosummary:
-topi.generic
-~~~~~~~~~~~~
-.. automodule:: topi.generic
-.. autofunction:: topi.generic.schedule_conv2d_nchw
-.. autofunction:: topi.generic.schedule_depthwise_conv2d_nchw
-.. autofunction:: topi.generic.schedule_conv3d_ncdhw
-.. autofunction:: topi.generic.schedule_conv3d_transpose_ncdhw
-.. autofunction:: topi.generic.schedule_reduce
-.. autofunction:: topi.generic.schedule_broadcast
-.. autofunction:: topi.generic.schedule_injective
# documentation root, use os.path.abspath to make it absolute, like shown here.
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../python/'))
-sys.path.insert(0, os.path.join(curr_path, '../topi/python'))
sys.path.insert(0, os.path.join(curr_path, '../vta/python'))
# -- General configuration ------------------------------------------------
os.environ['TVM_BUILD_DOC'] = '1'
# Version information.
import tvm
+from tvm import topi
from tvm import te
version = tvm.__version__
release = tvm.__version__
make
# let python know where to find tvm related libraries
- export PYTHONPATH=python:topi/python
+ export PYTHONPATH=python
rm -rf python/tvm/*.pyc python/tvm/*/*.pyc python/tvm/*/*/*.pyc
TVM_FFI=ctypes python -m pytest -v tests/python/unittest/test_pass_storage_rewrite.py
- ``src`` - C++ code for operator compilation and deployment runtimes.
- ``src/relay`` - Implementation of Relay, a new functional IR for deep learning framework.
- ``python`` - Python frontend that wraps C++ functions and objects implemented in ``src``.
-- ``topi`` - Compute definitions and backend schedules for standard neural network operators.
+- ``src/topi`` - Compute definitions and backend schedules for standard neural network operators.
Using standard Deep Learning terminology, ``src/relay`` is the component that manages a computational graph, and nodes in a graph are compiled and executed using infrastructure implemented in the rest of ``src``. ``python`` provides python bindings for the C++ API and driver code that users can use to execute compilation. Operators corresponding to each node are registered in ``src/relay/op``. Implementations of operators are in ``topi``, and they are coded in either C++ or Python.
inferbound
hybrid_script
-topi
-----
+tvm/topi
+--------
While possible to construct operators directly via TIR or tensor expressions (TE) for each use case it is tedious to do so.
`topi` (Tensor operator inventory) provides a set of pre-defined operators (in TE or TIR) defined by
numpy and found in common deep learning workloads. We also provide a collection of common schedule templates to obtain performant implementations across different target platforms.
Our goal is to build the shared libraries:
-- On Linux the target library are `libtvm.so, libtvm_topi.so`
-- On macOS the target library are `libtvm.dylib, libtvm_topi.dylib`
-- On Windows the target library are `libtvm.dll, libtvm_topi.dll`
+- On Linux the target library are `libtvm.so`
+- On macOS the target library are `libtvm.dylib`
+- On Windows the target library are `libtvm.dll`
.. code:: bash
.. code:: bash
export TVM_HOME=/path/to/tvm
- export PYTHONPATH=$TVM_HOME/python:$TVM_HOME/topi/python:${PYTHONPATH}
+ export PYTHONPATH=$TVM_HOME/python:${PYTHONPATH}
Method 2
# providing --user flag may trigger error during installation in such case.
export MACOSX_DEPLOYMENT_TARGET=10.9 # This is required for mac to avoid symbol conflicts with libstdc++
cd python; python setup.py install --user; cd ..
- cd topi/python; python setup.py install --user; cd ../..
-
Python dependencies
~~~~~~~~~~~~~~~~~~~
An operator is a primitive operation, such as :code:`add` or :code:`conv2d`, not defined in the Relay
language. Operators are declared in the global operator
registry in C++. Many common operators are backed by TVM's
-Tensor Operator Inventory (`TOPI <https://github.com/apache/incubator-tvm/tree/master/topi>`__).
+Tensor Operator Inventory.
To register an operator a user must provide an implementation
of the operator, its type, and any other desired metadata.
* \brief Broadcast op constructions
* \file topi/broadcast.h
*/
-#ifndef TOPI_BROADCAST_H_
-#define TOPI_BROADCAST_H_
+#ifndef TVM_TOPI_BROADCAST_H_
+#define TVM_TOPI_BROADCAST_H_
-#include <topi/detail/broadcast.h>
-#include <topi/detail/constant_utils.h>
-#include <topi/tags.h>
+#include <tvm/topi/detail/broadcast.h>
+#include <tvm/topi/detail/constant_utils.h>
+#include <tvm/topi/tags.h>
#include <algorithm>
#include <string>
+namespace tvm {
namespace topi {
/*!
TOPI_DEFINE_BCAST_OP(less_equal, { return (a <= b); });
} // namespace topi
+} // namespace tvm
-#endif // TOPI_BROADCAST_H_
+#endif // TVM_TOPI_BROADCAST_H_
* \brief External function interface to cuBLAS libraries
* \file cublas.h
*/
-#ifndef TOPI_CONTRIB_CUBLAS_H_
-#define TOPI_CONTRIB_CUBLAS_H_
+#ifndef TVM_TOPI_CONTRIB_CUBLAS_H_
+#define TVM_TOPI_CONTRIB_CUBLAS_H_
-#include <topi/detail/extern.h>
#include <tvm/te/operation.h>
+#include <tvm/topi/detail/extern.h>
+namespace tvm {
namespace topi {
namespace contrib {
-using namespace tvm;
+
using namespace tvm::te;
using namespace topi::detail;
/*!
} // namespace contrib
} // namespace topi
+} // namespace tvm
-#endif // TOPI_CONTRIB_CUBLAS_H_
+#endif // TVM_TOPI_CONTRIB_CUBLAS_H_
* \brief External function interface to rocBLAS libraries
* \file tags.h
*/
-#ifndef TOPI_CONTRIB_ROCBLAS_H_
-#define TOPI_CONTRIB_ROCBLAS_H_
+#ifndef TVM_TOPI_CONTRIB_ROCBLAS_H_
+#define TVM_TOPI_CONTRIB_ROCBLAS_H_
#include <tvm/te/operation.h>
+#include <tvm/topi/detail/extern.h>
-#include "topi/detail/extern.h"
-
+namespace tvm {
namespace topi {
namespace contrib {
-using namespace tvm;
+
using namespace tvm::te;
/*!
* \brief Create an op that multiplies lhs and rhs with rocBLAS
} // namespace contrib
} // namespace topi
+} // namespace tvm
-#endif // TOPI_CONTRIB_ROCBLAS_H_
+#endif // TVM_TOPI_CONTRIB_ROCBLAS_H_
* \file cuda/dense.h
* \brief CUDA schedule for dense operation
*/
-#ifndef TOPI_CUDA_DENSE_H_
-#define TOPI_CUDA_DENSE_H_
-
-#include <topi/contrib/cublas.h>
-#include <topi/detail/array_utils.h>
-#include <topi/generic/extern.h>
-#include <topi/nn/dense.h>
-#include <topi/tags.h>
+#ifndef TVM_TOPI_CUDA_DENSE_H_
+#define TVM_TOPI_CUDA_DENSE_H_
+
#include <tvm/target/generic_func.h>
#include <tvm/te/operation.h>
#include <tvm/te/schedule_pass.h>
+#include <tvm/topi/contrib/cublas.h>
+#include <tvm/topi/detail/array_utils.h>
+#include <tvm/topi/generic/extern.h>
+#include <tvm/topi/nn/dense.h>
+#include <tvm/topi/tags.h>
+namespace tvm {
namespace topi {
-using namespace tvm;
+
using namespace tvm::te;
namespace cuda {
} // namespace cuda
} // namespace topi
-#endif // TOPI_CUDA_DENSE_H_
+} // namespace tvm
+#endif // TVM_TOPI_CUDA_DENSE_H_
* \file cuda/injective.h
* \brief CUDA schedule for injective operations
*/
-#ifndef TOPI_CUDA_INJECTIVE_H_
-#define TOPI_CUDA_INJECTIVE_H_
+#ifndef TVM_TOPI_CUDA_INJECTIVE_H_
+#define TVM_TOPI_CUDA_INJECTIVE_H_
-#include <topi/detail/fuse.h>
-#include <topi/tags.h>
#include <tvm/target/generic_func.h>
#include <tvm/te/operation.h>
#include <tvm/te/schedule_pass.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/tags.h>
+namespace tvm {
namespace topi {
-using namespace tvm;
+
using namespace tvm::te;
namespace cuda {
} // namespace cuda
} // namespace topi
-#endif // TOPI_CUDA_INJECTIVE_H_
+} // namespace tvm
+#endif // TVM_TOPI_CUDA_INJECTIVE_H_
* \file cuda/normalization.h
* \brief CUDA schedule for LRN and l2 normalization operations
*/
-#ifndef TOPI_CUDA_NORMALIZATION_H_
-#define TOPI_CUDA_NORMALIZATION_H_
+#ifndef TVM_TOPI_CUDA_NORMALIZATION_H_
+#define TVM_TOPI_CUDA_NORMALIZATION_H_
-#include <topi/tags.h>
#include <tvm/target/generic_func.h>
#include <tvm/te/operation.h>
#include <tvm/te/schedule_pass.h>
+#include <tvm/topi/tags.h>
+namespace tvm {
namespace topi {
-using namespace tvm;
+
using namespace tvm::te;
namespace cuda {
/*!
} // namespace cuda
} // namespace topi
-#endif // TOPI_CUDA_NORMALIZATION_H_
+} // namespace tvm
+#endif // TVM_TOPI_CUDA_NORMALIZATION_H_
* \file cuda/pooling.h
* \brief CUDA schedule for pooling operations
*/
-#ifndef TOPI_CUDA_POOLING_H_
-#define TOPI_CUDA_POOLING_H_
+#ifndef TVM_TOPI_CUDA_POOLING_H_
+#define TVM_TOPI_CUDA_POOLING_H_
-#include <topi/detail/array_utils.h>
-#include <topi/detail/fuse.h>
-#include <topi/tags.h>
#include <tvm/target/generic_func.h>
#include <tvm/te/operation.h>
#include <tvm/te/schedule_pass.h>
+#include <tvm/topi/detail/array_utils.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/tags.h>
+namespace tvm {
namespace topi {
-using namespace tvm;
+
using namespace tvm::te;
namespace cuda {
} // namespace cuda
} // namespace topi
-#endif // TOPI_CUDA_POOLING_H_
+} // namespace tvm
+#endif // TVM_TOPI_CUDA_POOLING_H_
* \file cuda/reduction.h
* \brief CUDA schedule for reduction operations
*/
-#ifndef TOPI_CUDA_REDUCTION_H_
-#define TOPI_CUDA_REDUCTION_H_
+#ifndef TVM_TOPI_CUDA_REDUCTION_H_
+#define TVM_TOPI_CUDA_REDUCTION_H_
-#include <topi/detail/fuse.h>
-#include <topi/tags.h>
#include <tvm/target/generic_func.h>
#include <tvm/te/operation.h>
#include <tvm/te/schedule_pass.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/tags.h>
+namespace tvm {
namespace topi {
-using namespace tvm;
+
using namespace tvm::te;
namespace cuda {
} // namespace cuda
} // namespace topi
-#endif // TOPI_CUDA_REDUCTION_H_
+} // namespace tvm
+#endif // TVM_TOPI_CUDA_REDUCTION_H_
* \file cuda/injective.h
* \brief CUDA schedule for injective operations
*/
-#ifndef TOPI_CUDA_SOFTMAX_H_
-#define TOPI_CUDA_SOFTMAX_H_
+#ifndef TVM_TOPI_CUDA_SOFTMAX_H_
+#define TVM_TOPI_CUDA_SOFTMAX_H_
-#include <topi/detail/fuse.h>
-#include <topi/tags.h>
#include <tvm/target/generic_func.h>
#include <tvm/te/operation.h>
#include <tvm/te/schedule_pass.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/tags.h>
+namespace tvm {
namespace topi {
-using namespace tvm;
+
using namespace tvm::te;
namespace cuda {
} // namespace cuda
} // namespace topi
-#endif // TOPI_CUDA_SOFTMAX_H_
+} // namespace tvm
+#endif // TVM_TOPI_CUDA_SOFTMAX_H_
* \file array_utils.h
* \brief Utility functions for handling arrays
*/
-#ifndef TOPI_DETAIL_ARRAY_UTILS_H_
-#define TOPI_DETAIL_ARRAY_UTILS_H_
+#ifndef TVM_TOPI_DETAIL_ARRAY_UTILS_H_
+#define TVM_TOPI_DETAIL_ARRAY_UTILS_H_
#include <tvm/te/operation.h>
+namespace tvm {
namespace topi {
namespace detail {
-using namespace tvm;
+
using namespace tvm::te;
/*!
} // namespace detail
} // namespace topi
-#endif // TOPI_DETAIL_ARRAY_UTILS_H_
+} // namespace tvm
+#endif // TVM_TOPI_DETAIL_ARRAY_UTILS_H_
* \brief Detail broadcast.
* \file topi/detail/broadcast.h
*/
-#ifndef TOPI_DETAIL_BROADCAST_H_
-#define TOPI_DETAIL_BROADCAST_H_
+#ifndef TVM_TOPI_DETAIL_BROADCAST_H_
+#define TVM_TOPI_DETAIL_BROADCAST_H_
-#include <topi/detail/constant_utils.h>
#include <tvm/te/operation.h>
+#include <tvm/topi/detail/constant_utils.h>
#include <algorithm>
#include <deque>
#include <string>
+namespace tvm {
namespace topi {
namespace detail {
} // namespace detail
} // namespace topi
+} // namespace tvm
-#endif // TOPI_DETAIL_BROADCAST_H_
+#endif // TVM_TOPI_DETAIL_BROADCAST_H_
* \file constant_utils.h
* \brief Utility functions for handling constants in TVM expressions
*/
-#ifndef TOPI_DETAIL_CONSTANT_UTILS_H_
-#define TOPI_DETAIL_CONSTANT_UTILS_H_
+#ifndef TVM_TOPI_DETAIL_CONSTANT_UTILS_H_
+#define TVM_TOPI_DETAIL_CONSTANT_UTILS_H_
#include <tvm/arith/analyzer.h>
#include <tvm/te/operation.h>
#include <string>
#include <vector>
+namespace tvm {
namespace topi {
namespace detail {
-using namespace tvm;
+
using namespace tvm::te;
/*!
} // namespace detail
} // namespace topi
-#endif // TOPI_DETAIL_CONSTANT_UTILS_H_
+} // namespace tvm
+#endif // TVM_TOPI_DETAIL_CONSTANT_UTILS_H_
* \file detail/extern.h
* \brief Helpers for using external functions
*/
-#ifndef TOPI_DETAIL_EXTERN_H_
-#define TOPI_DETAIL_EXTERN_H_
+#ifndef TVM_TOPI_DETAIL_EXTERN_H_
+#define TVM_TOPI_DETAIL_EXTERN_H_
#include <tvm/te/operation.h>
#include <tvm/tir/builtin.h>
#include <string>
#include <vector>
+namespace tvm {
namespace topi {
namespace detail {
-using namespace tvm;
+
using namespace tvm::te;
/*!
} // namespace detail
} // namespace topi
-#endif // TOPI_DETAIL_EXTERN_H_
+} // namespace tvm
+#endif // TVM_TOPI_DETAIL_EXTERN_H_
* \file fuse.h
* \brief Fuse operation
*/
-#ifndef TOPI_DETAIL_FUSE_H_
-#define TOPI_DETAIL_FUSE_H_
+#ifndef TVM_TOPI_DETAIL_FUSE_H_
+#define TVM_TOPI_DETAIL_FUSE_H_
#include <tvm/te/operation.h>
+namespace tvm {
namespace topi {
namespace detail {
-using namespace tvm;
+
using namespace tvm::te;
/*!
} // namespace detail
} // namespace topi
-#endif // TOPI_DETAIL_FUSE_H_
+} // namespace tvm
+#endif // TVM_TOPI_DETAIL_FUSE_H_
* \file pad_utils.h
* \brief Padding helpers
*/
-#ifndef TOPI_DETAIL_PAD_UTILS_H_
-#define TOPI_DETAIL_PAD_UTILS_H_
+#ifndef TVM_TOPI_DETAIL_PAD_UTILS_H_
+#define TVM_TOPI_DETAIL_PAD_UTILS_H_
#include <tvm/te/operation.h>
#include <tvm/tir/expr.h>
#include <vector>
+namespace tvm {
namespace topi {
namespace detail {
-using namespace tvm;
+
using namespace tvm::te;
/*!
} // namespace detail
} // namespace topi
-#endif // TOPI_DETAIL_PAD_UTILS_H_
+} // namespace tvm
+#endif // TVM_TOPI_DETAIL_PAD_UTILS_H_
* \file ravel_unravel.h
* \brief Index ravel and unraval operations
*/
-#ifndef TOPI_DETAIL_RAVEL_UNRAVEL_H_
-#define TOPI_DETAIL_RAVEL_UNRAVEL_H_
+#ifndef TVM_TOPI_DETAIL_RAVEL_UNRAVEL_H_
+#define TVM_TOPI_DETAIL_RAVEL_UNRAVEL_H_
#include <tvm/te/operation.h>
#include <vector>
+namespace tvm {
namespace topi {
namespace detail {
-using namespace tvm;
+
using namespace tvm::te;
/*!
} // namespace detail
} // namespace topi
-#endif // TOPI_DETAIL_RAVEL_UNRAVEL_H_
+} // namespace tvm
+#endif // TVM_TOPI_DETAIL_RAVEL_UNRAVEL_H_
* \file tensor_utils.h
* \brief Utility functions for handling tensor
*/
-#ifndef TOPI_DETAIL_TENSOR_UTILS_H_
-#define TOPI_DETAIL_TENSOR_UTILS_H_
+#ifndef TVM_TOPI_DETAIL_TENSOR_UTILS_H_
+#define TVM_TOPI_DETAIL_TENSOR_UTILS_H_
#include <tvm/te/operation.h>
+namespace tvm {
namespace topi {
namespace detail {
-using namespace tvm;
+
using namespace tvm::te;
/*!
} // namespace detail
} // namespace topi
-#endif // TOPI_DETAIL_TENSOR_UTILS_H_
+} // namespace tvm
+#endif // TVM_TOPI_DETAIL_TENSOR_UTILS_H_
* \file elemwise.h
* \brief Elementwise op constructions
*/
-#ifndef TOPI_ELEMWISE_H_
-#define TOPI_ELEMWISE_H_
+#ifndef TVM_TOPI_ELEMWISE_H_
+#define TVM_TOPI_ELEMWISE_H_
-#include <topi/tags.h>
#include <tvm/tir/builtin.h>
#include <tvm/tir/expr.h>
+#include <tvm/topi/tags.h>
#include <algorithm>
#include <string>
#include "broadcast.h"
+namespace tvm {
namespace topi {
-using namespace tvm;
+
using namespace tvm::te;
// Unary intrinsic operators
}
} // namespace topi
-#endif // TOPI_ELEMWISE_H_
+} // namespace tvm
+#endif // TVM_TOPI_ELEMWISE_H_
* \file generic/default.h
* \brief Generic default schedule
*/
-#ifndef TOPI_GENERIC_DEFAULT_H_
-#define TOPI_GENERIC_DEFAULT_H_
+#ifndef TVM_TOPI_GENERIC_DEFAULT_H_
+#define TVM_TOPI_GENERIC_DEFAULT_H_
-#include <topi/detail/fuse.h>
-#include <topi/tags.h>
#include <tvm/target/generic_func.h>
#include <tvm/te/operation.h>
#include <tvm/te/schedule_pass.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/tags.h>
+namespace tvm {
namespace topi {
-using namespace tvm;
+
using namespace tvm::te;
namespace generic {
} // namespace generic
} // namespace topi
-#endif // TOPI_GENERIC_DEFAULT_H_
+} // namespace tvm
+#endif // TVM_TOPI_GENERIC_DEFAULT_H_
* \file generic/extern.h
* \brief Schedule for extern followed by injective ops
*/
-#ifndef TOPI_GENERIC_EXTERN_H_
-#define TOPI_GENERIC_EXTERN_H_
+#ifndef TVM_TOPI_GENERIC_EXTERN_H_
+#define TVM_TOPI_GENERIC_EXTERN_H_
-#include <topi/detail/fuse.h>
-#include <topi/generic/injective.h>
-#include <topi/tags.h>
#include <tvm/target/generic_func.h>
#include <tvm/te/operation.h>
#include <tvm/te/schedule_pass.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/generic/injective.h>
+#include <tvm/topi/tags.h>
+namespace tvm {
namespace topi {
-using namespace tvm;
+
using namespace tvm::te;
namespace generic {
} // namespace generic
} // namespace topi
-#endif // TOPI_GENERIC_EXTERN_H_
+} // namespace tvm
+#endif // TVM_TOPI_GENERIC_EXTERN_H_
* \file generic/injective.h
* \brief Generic schedule for injective operations
*/
-#ifndef TOPI_GENERIC_INJECTIVE_H_
-#define TOPI_GENERIC_INJECTIVE_H_
+#ifndef TVM_TOPI_GENERIC_INJECTIVE_H_
+#define TVM_TOPI_GENERIC_INJECTIVE_H_
-#include <topi/detail/fuse.h>
-#include <topi/tags.h>
#include <tvm/target/generic_func.h>
#include <tvm/te/operation.h>
#include <tvm/te/schedule_pass.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/tags.h>
+namespace tvm {
namespace topi {
-using namespace tvm;
+
using namespace tvm::te;
namespace generic {
} // namespace generic
} // namespace topi
-#endif // TOPI_GENERIC_INJECTIVE_H_
+} // namespace tvm
+#endif // TVM_TOPI_GENERIC_INJECTIVE_H_
* \brief NN op constructions
* \file topi/nn.h
*/
-#ifndef TOPI_NN_H_
-#define TOPI_NN_H_
+#ifndef TVM_TOPI_NN_H_
+#define TVM_TOPI_NN_H_
-#include <topi/detail/constant_utils.h>
-#include <topi/tags.h>
#include <tvm/arith/analyzer.h>
#include <tvm/te/operation.h>
#include <tvm/tir/expr.h>
#include <tvm/tir/op.h>
+#include <tvm/topi/detail/constant_utils.h>
+#include <tvm/topi/tags.h>
#include <algorithm>
#include <string>
+namespace tvm {
namespace topi {
-using namespace tvm;
+
using namespace tvm::te;
/*!
}
} // namespace topi
-#endif // TOPI_NN_H_
+} // namespace tvm
+#endif // TVM_TOPI_NN_H_
* \brief Batch matmul op constructions
* \file nn/batch_matmul.h
*/
-#ifndef TOPI_NN_BATCH_MATMUL_H_
-#define TOPI_NN_BATCH_MATMUL_H_
+#ifndef TVM_TOPI_NN_BATCH_MATMUL_H_
+#define TVM_TOPI_NN_BATCH_MATMUL_H_
-#include <topi/tags.h>
#include <tvm/te/operation.h>
+#include <tvm/topi/tags.h>
#include <string>
+namespace tvm {
namespace topi {
namespace nn {
-using namespace tvm;
+
using namespace tvm::te;
/*!
} // namespace nn
} // namespace topi
+} // namespace tvm
-#endif // TOPI_NN_BATCH_MATMUL_H_
+#endif // TVM_TOPI_NN_BATCH_MATMUL_H_
* \brief bias_add op constructions
* \file nn/bias_add.h
*/
-#ifndef TOPI_NN_BIAS_ADD_H_
-#define TOPI_NN_BIAS_ADD_H_
+#ifndef TVM_TOPI_NN_BIAS_ADD_H_
+#define TVM_TOPI_NN_BIAS_ADD_H_
-#include <topi/broadcast.h>
-#include <topi/tags.h>
-#include <topi/transform.h>
#include <tvm/te/operation.h>
+#include <tvm/topi/broadcast.h>
+#include <tvm/topi/tags.h>
+#include <tvm/topi/transform.h>
#include <string>
+namespace tvm {
namespace topi {
namespace nn {
}
} // namespace nn
} // namespace topi
-#endif // TOPI_NN_BIAS_ADD_H_
+} // namespace tvm
+#endif // TVM_TOPI_NN_BIAS_ADD_H_
* \brief Binary op constructions
* \file nn/bnn.h
*/
-#ifndef TOPI_NN_BNN_H_
-#define TOPI_NN_BNN_H_
+#ifndef TVM_TOPI_NN_BNN_H_
+#define TVM_TOPI_NN_BNN_H_
-#include <topi/detail/constant_utils.h>
-#include <topi/tags.h>
#include <tvm/arith/analyzer.h>
#include <tvm/te/operation.h>
+#include <tvm/topi/detail/constant_utils.h>
+#include <tvm/topi/tags.h>
#include <string>
+namespace tvm {
namespace topi {
namespace nn {
-using namespace tvm;
+
using namespace tvm::te;
/*!
} // namespace nn
} // namespace topi
-#endif // TOPI_NN_BNN_H_
+} // namespace tvm
+#endif // TVM_TOPI_NN_BNN_H_
* \brief Dense op constructions
* \file nn/dense.h
*/
-#ifndef TOPI_NN_DENSE_H_
-#define TOPI_NN_DENSE_H_
+#ifndef TVM_TOPI_NN_DENSE_H_
+#define TVM_TOPI_NN_DENSE_H_
-#include <topi/tags.h>
#include <tvm/te/operation.h>
+#include <tvm/topi/tags.h>
#include <string>
+namespace tvm {
namespace topi {
namespace nn {
-using namespace tvm;
+
using namespace tvm::te;
/*!
} // namespace nn
} // namespace topi
-#endif // TOPI_NN_DENSE_H_
+} // namespace tvm
+#endif // TVM_TOPI_NN_DENSE_H_
* \brief Dilate op constructions
* \file nn/dilate.h
*/
-#ifndef TOPI_NN_DILATE_H_
-#define TOPI_NN_DILATE_H_
+#ifndef TVM_TOPI_NN_DILATE_H_
+#define TVM_TOPI_NN_DILATE_H_
-#include <topi/tags.h>
#include <tvm/arith/analyzer.h>
#include <tvm/te/operation.h>
+#include <tvm/topi/tags.h>
#include <string>
+namespace tvm {
namespace topi {
namespace nn {
-using namespace tvm;
+
using namespace tvm::te;
/*!
} // namespace nn
} // namespace topi
-#endif // TOPI_NN_DILATE_H_
+} // namespace tvm
+#endif // TVM_TOPI_NN_DILATE_H_
* \brief Softmax op constructions
* \file nn/flatten.h
*/
-#ifndef TOPI_NN_FLATTEN_H_
-#define TOPI_NN_FLATTEN_H_
+#ifndef TVM_TOPI_NN_FLATTEN_H_
+#define TVM_TOPI_NN_FLATTEN_H_
-#include <topi/detail/constant_utils.h>
-#include <topi/tags.h>
#include <tvm/te/operation.h>
+#include <tvm/topi/detail/constant_utils.h>
+#include <tvm/topi/tags.h>
#include <string>
#include <vector>
+namespace tvm {
namespace topi {
namespace nn {
-using namespace tvm;
+
using namespace tvm::te;
/*!
} // namespace nn
} // namespace topi
-#endif // TOPI_NN_FLATTEN_H_
+} // namespace tvm
+#endif // TVM_TOPI_NN_FLATTEN_H_
* \brief local response normalization op constructions
* \file nn/local_response_norm.h
*/
-#ifndef TOPI_NN_LOCAL_RESPONSE_NORM_H_
-#define TOPI_NN_LOCAL_RESPONSE_NORM_H_
+#ifndef TVM_TOPI_NN_LOCAL_RESPONSE_NORM_H_
+#define TVM_TOPI_NN_LOCAL_RESPONSE_NORM_H_
-#include <topi/tags.h>
#include <tvm/te/operation.h>
+#include <tvm/topi/tags.h>
#include <string>
+namespace tvm {
namespace topi {
namespace nn {
-using namespace tvm;
+
using namespace tvm::te;
/*!
}
} // namespace nn
} // namespace topi
-#endif // TOPI_NN_LOCAL_RESPONSE_NORM_H_
+} // namespace tvm
+#endif // TVM_TOPI_NN_LOCAL_RESPONSE_NORM_H_
* \brief Mapping op constructions
* \file nn/mapping.h
*/
-#ifndef TOPI_NN_MAPPING_H_
-#define TOPI_NN_MAPPING_H_
+#ifndef TVM_TOPI_NN_MAPPING_H_
+#define TVM_TOPI_NN_MAPPING_H_
-#include <topi/tags.h>
#include <tvm/te/operation.h>
+#include <tvm/topi/tags.h>
#include <string>
+namespace tvm {
namespace topi {
namespace nn {
-using namespace tvm;
+
using namespace tvm::te;
/*!
} // namespace nn
} // namespace topi
-#endif // TOPI_NN_MAPPING_H_
+} // namespace tvm
+#endif // TVM_TOPI_NN_MAPPING_H_
* \brief Pooling op constructions
* \file nn/pooling.h
*/
-#ifndef TOPI_NN_POOLING_H_
-#define TOPI_NN_POOLING_H_
+#ifndef TVM_TOPI_NN_POOLING_H_
+#define TVM_TOPI_NN_POOLING_H_
-#include <topi/detail/pad_utils.h>
-#include <topi/nn.h>
-#include <topi/reduction.h>
-#include <topi/tags.h>
#include <tvm/arith/analyzer.h>
+#include <tvm/topi/detail/pad_utils.h>
+#include <tvm/topi/nn.h>
+#include <tvm/topi/reduction.h>
+#include <tvm/topi/tags.h>
#include <algorithm>
#include <string>
#include <vector>
+namespace tvm {
namespace topi {
namespace nn {
-using namespace tvm;
+
using namespace tvm::te;
/*! \brief Pooling type */
} // namespace nn
} // namespace topi
-#endif // TOPI_NN_POOLING_H_
+} // namespace tvm
+#endif // TVM_TOPI_NN_POOLING_H_
* \brief Softmax op constructions
* \file nn/softmax.h
*/
-#ifndef TOPI_NN_SOFTMAX_H_
-#define TOPI_NN_SOFTMAX_H_
+#ifndef TVM_TOPI_NN_SOFTMAX_H_
+#define TVM_TOPI_NN_SOFTMAX_H_
-#include <topi/reduction.h>
-#include <topi/tags.h>
#include <tvm/te/operation.h>
+#include <tvm/topi/reduction.h>
+#include <tvm/topi/tags.h>
#include <algorithm>
#include <string>
+namespace tvm {
namespace topi {
namespace nn {
-using namespace tvm;
+
using namespace tvm::te;
/*!
} // namespace nn
} // namespace topi
-#endif // TOPI_NN_SOFTMAX_H_
+} // namespace tvm
+#endif // TVM_TOPI_NN_SOFTMAX_H_
* \file topi/reduction.h
* \brief Reduction op constructors
*/
-#ifndef TOPI_REDUCTION_H_
-#define TOPI_REDUCTION_H_
-
-#include <topi/broadcast.h>
-#include <topi/detail/constant_utils.h>
-#include <topi/detail/ravel_unravel.h>
-#include <topi/elemwise.h>
-#include <topi/tags.h>
-#include <topi/transform.h>
+#ifndef TVM_TOPI_REDUCTION_H_
+#define TVM_TOPI_REDUCTION_H_
+
#include <tvm/te/operation.h>
+#include <tvm/topi/broadcast.h>
+#include <tvm/topi/detail/constant_utils.h>
+#include <tvm/topi/detail/ravel_unravel.h>
+#include <tvm/topi/elemwise.h>
+#include <tvm/topi/tags.h>
+#include <tvm/topi/transform.h>
#include <algorithm>
#include <iterator>
#include <string>
#include <vector>
+namespace tvm {
namespace topi {
-using namespace tvm;
+
using namespace tvm::te;
/*! \brief The operation to use for CommReduce */
}
} // namespace topi
-#endif // TOPI_REDUCTION_H_
+} // namespace tvm
+#endif // TVM_TOPI_REDUCTION_H_
* \file rocm/dense.h
* \brief rocm schedule for dense operation
*/
-#ifndef TOPI_ROCM_DENSE_H_
-#define TOPI_ROCM_DENSE_H_
+#ifndef TVM_TOPI_ROCM_DENSE_H_
+#define TVM_TOPI_ROCM_DENSE_H_
-#include <topi/tags.h>
#include <tvm/target/generic_func.h>
#include <tvm/te/operation.h>
+#include <tvm/topi/contrib/rocblas.h>
+#include <tvm/topi/cuda/dense.h>
+#include <tvm/topi/detail/array_utils.h>
+#include <tvm/topi/generic/extern.h>
+#include <tvm/topi/nn/dense.h>
+#include <tvm/topi/tags.h>
-#include "topi/contrib/rocblas.h"
-#include "topi/cuda/dense.h"
-#include "topi/detail/array_utils.h"
-#include "topi/generic/extern.h"
-#include "topi/nn/dense.h"
-
+namespace tvm {
namespace topi {
-using namespace tvm;
+
using namespace tvm::te;
namespace rocm {
} // namespace rocm
} // namespace topi
-#endif // TOPI_ROCM_DENSE_H_
+} // namespace tvm
+#endif // TVM_TOPI_ROCM_DENSE_H_
* \file rocm/injective.h
* \brief rocm schedule for injective operations
*/
-#ifndef TOPI_ROCM_INJECTIVE_H_
-#define TOPI_ROCM_INJECTIVE_H_
+#ifndef TVM_TOPI_ROCM_INJECTIVE_H_
+#define TVM_TOPI_ROCM_INJECTIVE_H_
-#include <topi/detail/fuse.h>
-#include <topi/tags.h>
#include <tvm/target/generic_func.h>
#include <tvm/te/operation.h>
+#include <tvm/topi/cuda/injective.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/tags.h>
-#include "topi/cuda/injective.h"
-
+namespace tvm {
namespace topi {
-using namespace tvm;
+
using namespace tvm::te;
namespace rocm {
} // namespace rocm
} // namespace topi
-#endif // TOPI_ROCM_INJECTIVE_H_
+} // namespace tvm
+#endif // TVM_TOPI_ROCM_INJECTIVE_H_
* \file rocm/normalization.h
* \brief rocm schedule for LRN and l2 normalization operations
*/
-#ifndef TOPI_ROCM_NORMALIZATION_H_
-#define TOPI_ROCM_NORMALIZATION_H_
+#ifndef TVM_TOPI_ROCM_NORMALIZATION_H_
+#define TVM_TOPI_ROCM_NORMALIZATION_H_
-#include <topi/tags.h>
#include <tvm/target/generic_func.h>
#include <tvm/te/operation.h>
+#include <tvm/topi/tags.h>
+namespace tvm {
namespace topi {
-using namespace tvm;
+
using namespace tvm::te;
namespace rocm {
/*!
} // namespace rocm
} // namespace topi
-#endif // TOPI_ROCM_NORMALIZATION_H_
+} // namespace tvm
+#endif // TVM_TOPI_ROCM_NORMALIZATION_H_
* \file rocm/pooling.h
* \brief rocm schedule for pooling operations
*/
-#ifndef TOPI_ROCM_POOLING_H_
-#define TOPI_ROCM_POOLING_H_
+#ifndef TVM_TOPI_ROCM_POOLING_H_
+#define TVM_TOPI_ROCM_POOLING_H_
-#include <topi/cuda/pooling.h>
-#include <topi/detail/array_utils.h>
-#include <topi/detail/fuse.h>
-#include <topi/tags.h>
#include <tvm/target/generic_func.h>
#include <tvm/te/operation.h>
+#include <tvm/topi/cuda/pooling.h>
+#include <tvm/topi/detail/array_utils.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/tags.h>
+namespace tvm {
namespace topi {
-using namespace tvm;
+
using namespace tvm::te;
namespace rocm {
} // namespace rocm
} // namespace topi
-#endif // TOPI_ROCM_POOLING_H_
+} // namespace tvm
+#endif // TVM_TOPI_ROCM_POOLING_H_
* \file rocm/reduction.h
* \brief rocm schedule for reduction operations
*/
-#ifndef TOPI_ROCM_REDUCTION_H_
-#define TOPI_ROCM_REDUCTION_H_
+#ifndef TVM_TOPI_ROCM_REDUCTION_H_
+#define TVM_TOPI_ROCM_REDUCTION_H_
-#include <topi/detail/fuse.h>
-#include <topi/tags.h>
#include <tvm/target/generic_func.h>
#include <tvm/te/operation.h>
+#include <tvm/topi/cuda/reduction.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/tags.h>
-#include "topi/cuda/reduction.h"
-
+namespace tvm {
namespace topi {
-using namespace tvm;
+
using namespace tvm::te;
namespace rocm {
} // namespace rocm
} // namespace topi
-#endif // TOPI_ROCM_REDUCTION_H_
+} // namespace tvm
+#endif // TVM_TOPI_ROCM_REDUCTION_H_
* \file rocm/injective.h
* \brief ROCM schedule for injective operations
*/
-#ifndef TOPI_ROCM_SOFTMAX_H_
-#define TOPI_ROCM_SOFTMAX_H_
+#ifndef TVM_TOPI_ROCM_SOFTMAX_H_
+#define TVM_TOPI_ROCM_SOFTMAX_H_
-#include <topi/detail/fuse.h>
-#include <topi/tags.h>
#include <tvm/target/generic_func.h>
#include <tvm/te/operation.h>
+#include <tvm/topi/cuda/softmax.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/tags.h>
-#include "topi/cuda/softmax.h"
-
+namespace tvm {
namespace topi {
-using namespace tvm;
+
using namespace tvm::te;
namespace rocm {
} // namespace rocm
} // namespace topi
-#endif // TOPI_ROCM_SOFTMAX_H_
+} // namespace tvm
+#endif // TVM_TOPI_ROCM_SOFTMAX_H_
* \brief Tag definitions
* \file tags.h
*/
-#ifndef TOPI_TAGS_H_
-#define TOPI_TAGS_H_
+#ifndef TVM_TOPI_TAGS_H_
+#define TVM_TOPI_TAGS_H_
#include <string>
+namespace tvm {
namespace topi {
constexpr auto kElementWise = "elemwise";
}
} // namespace topi
+} // namespace tvm
-#endif // TOPI_TAGS_H_
+#endif // TVM_TOPI_TAGS_H_
* \file topi/transform.h
* \brief Transform op constructors
*/
-#ifndef TOPI_TRANSFORM_H_
-#define TOPI_TRANSFORM_H_
+#ifndef TVM_TOPI_TRANSFORM_H_
+#define TVM_TOPI_TRANSFORM_H_
-#include <topi/detail/constant_utils.h>
-#include <topi/detail/ravel_unravel.h>
-#include <topi/detail/tensor_utils.h>
-#include <topi/tags.h>
#include <tvm/te/operation.h>
#include <tvm/tir/data_layout.h>
+#include <tvm/topi/detail/constant_utils.h>
+#include <tvm/topi/detail/ravel_unravel.h>
+#include <tvm/topi/detail/tensor_utils.h>
+#include <tvm/topi/tags.h>
#include <algorithm>
#include <iterator>
#include <unordered_set>
#include <vector>
+namespace tvm {
namespace topi {
-using namespace tvm;
+
using namespace tvm::te;
using namespace topi::detail;
}
} // namespace topi
-#endif // TOPI_TRANSFORM_H_
+} // namespace tvm
+#endif // TVM_TOPI_TRANSFORM_H_
* \brief Topi utility function
* \file topi/util.h
*/
-#ifndef TOPI_UTIL_H_
-#define TOPI_UTIL_H_
+#ifndef TVM_TOPI_UTIL_H_
+#define TVM_TOPI_UTIL_H_
#include <tvm/ir/expr.h>
#include <tvm/runtime/packed_func.h>
+namespace tvm {
namespace topi {
-using namespace tvm;
using namespace tvm::runtime;
/*! \brief Canonicalize an argument that may be Array<Expr> or int to Array<Expr> */
}
}
} // namespace topi
-#endif // TOPI_UTIL_H_
+} // namespace tvm
+#endif // TVM_TOPI_UTIL_H_
* \brief Reorg op constructions
* \file vision/reorg.h
*/
-#ifndef TOPI_VISION_REORG_H_
-#define TOPI_VISION_REORG_H_
+#ifndef TVM_TOPI_VISION_REORG_H_
+#define TVM_TOPI_VISION_REORG_H_
-#include <topi/detail/constant_utils.h>
-#include <topi/reduction.h>
-#include <topi/tags.h>
-#include <topi/transform.h>
#include <tvm/te/operation.h>
+#include <tvm/topi/detail/constant_utils.h>
+#include <tvm/topi/reduction.h>
+#include <tvm/topi/tags.h>
+#include <tvm/topi/transform.h>
#include <algorithm>
#include <string>
+namespace tvm {
namespace topi {
namespace vision {
-using namespace tvm;
+
using namespace tvm::te;
/*!
}
} // namespace vision
} // namespace topi
-#endif // TOPI_VISION_REORG_H_
+} // namespace tvm
+#endif // TVM_TOPI_VISION_REORG_H_
* \file x86/bnn.h
* \brief x86 schedule for binary operations
*/
-#ifndef TOPI_X86_BNN_H_
-#define TOPI_X86_BNN_H_
+#ifndef TVM_TOPI_X86_BNN_H_
+#define TVM_TOPI_X86_BNN_H_
-#include <topi/detail/fuse.h>
-#include <topi/tags.h>
#include <tvm/target/generic_func.h>
#include <tvm/te/operation.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/tags.h>
+namespace tvm {
namespace topi {
-using namespace tvm;
+
using namespace tvm::te;
namespace x86 {
} // namespace x86
} // namespace topi
-#endif // TOPI_X86_BNN_H_
+} // namespace tvm
+#endif // TVM_TOPI_X86_BNN_H_
* \file x86/default.h
* \brief default x86 schedule
*/
-#ifndef TOPI_X86_DEFAULT_H_
-#define TOPI_X86_DEFAULT_H_
+#ifndef TVM_TOPI_X86_DEFAULT_H_
+#define TVM_TOPI_X86_DEFAULT_H_
-#include <topi/detail/fuse.h>
-#include <topi/tags.h>
#include <tvm/target/generic_func.h>
#include <tvm/te/operation.h>
#include <tvm/te/schedule_pass.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/tags.h>
+namespace tvm {
namespace topi {
-using namespace tvm;
+
using namespace tvm::te;
namespace x86 {
} // namespace x86
} // namespace topi
-#endif // TOPI_X86_DEFAULT_H_
+} // namespace tvm
+#endif // TVM_TOPI_X86_DEFAULT_H_
* \file x86/injective.h
* \brief x86 schedule for injective ops
*/
-#ifndef TOPI_X86_INJECTIVE_H_
-#define TOPI_X86_INJECTIVE_H_
+#ifndef TVM_TOPI_X86_INJECTIVE_H_
+#define TVM_TOPI_X86_INJECTIVE_H_
-#include <topi/detail/fuse.h>
-#include <topi/tags.h>
#include <tvm/target/generic_func.h>
#include <tvm/te/operation.h>
+#include <tvm/topi/detail/fuse.h>
+#include <tvm/topi/tags.h>
+namespace tvm {
namespace topi {
-using namespace tvm;
+
using namespace tvm::te;
namespace x86 {
} // namespace x86
} // namespace topi
-#endif // TOPI_X86_INJECTIVE_H_
+} // namespace tvm
+#endif // TVM_TOPI_X86_INJECTIVE_H_
from abc import abstractmethod
import numpy as np
-import topi
+from tvm import topi
import tvm
from tvm import te
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
- import topi
+ from tvm import topi
env = TaskExtractEnv.get()
import tvm
from tvm.ir import IRModule
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
from .. import expr as _expr
from .. import function as _function
from tvm.ir import IRModule
from tvm import relay
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
from .. import analysis
from .. import expr as _expr
from .. import function as _function
from tvm.ir import IRModule
from tvm.relay.prelude import Prelude, StaticTensorArrayOps, get_tensor_array_shape
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
from .. import analysis
from .. import expr as _expr
from tvm.runtime import convert
from tvm.te.hybrid import script
-from topi.util import get_const_int, get_const_tuple
+from tvm.topi.util import get_const_int, get_const_tuple
from . import op as _reg
_reg.register_reduce_schedule("argmax")
"""Backend compiler related feature registration"""
from tvm.te.hybrid import script
-import topi
+from tvm import topi
from .op import register_compute, register_shape_func
from .op import register_broadcast_schedule, register_injective_schedule
"""Backend compiler related feature registration"""
from __future__ import absolute_import
-from topi.nn.util import get_pad_tuple
-from topi.util import get_const_tuple
+from tvm.topi.nn.util import get_pad_tuple
+from tvm.topi.util import get_const_tuple
from ..expr import Tuple, TupleGetItem, const
from . import nn as _nn
from tvm import te
from tvm.te.hybrid import script
from tvm.runtime import convert
-import topi
-from topi.util import get_const_int, get_const_tuple
+from tvm import topi
+from tvm.topi.util import get_const_int, get_const_tuple
from . import op as _reg
from . import strategy
from .op import OpPattern
#pylint: disable=invalid-name, unused-argument, len-as-condition
"""Backend compiler related feature registration for dynamic ops"""
-import topi
+from tvm import topi
from ..op import register_shape_func, register_compute
from ..op import register_broadcast_schedule
from tvm.te.hybrid import script
from tvm.runtime import convert
-import topi
-from topi.util import get_const_tuple
+from tvm import topi
+from tvm.topi.util import get_const_tuple
from .. import op as reg
from .. import strategy
from ..op import OpPattern
This operator takes data as input and does 3D scaling to the given scale factor.
In the default case, where the data_layout is `NCDHW`
- with data of shape (n, c, d, h, w)
- out will have a shape (n, c, size[0], size[1], size[2])
+ with data of shape `(n, c, d, h, w)`
+ out will have a shape `(n, c, size[0], size[1], size[2])`
method indicates the algorithm to be used while calculating the out value
and method can be one of ("trilinear", "nearest_neighbor")
"""Backend compiler related feature registration"""
from __future__ import absolute_import
-import topi
-from topi.util import get_const_tuple
+from tvm import topi
+from tvm.topi.util import get_const_tuple
from tvm.runtime import convert
from tvm.te.hybrid import script
.. math::
- `y = x > 0 ? x : alpha * x`
+ y = x > 0 ? x : alpha * x
Parameters
----------
bit_axis=2,
pack_type="uint32",
name="BitPack"):
- r"""Tensor packing for bitserial operations.
+ """Tensor packing for bitserial operations.
+
The values along the input tensor's pack_axis are quantized
- and packed together into the specified pack_type in a new
- bit axis.
+ and packed together into the specified pack_type in a new bit axis.
- For example, consider bitpacking with data to be a tensor with shape [1, 64, 128, 128],
+ For example, consider bitpacking with data to be a tensor with shape `[1, 64, 128, 128]`,
pack_axis=1, bit_axis=4, pack_type=uint8, and bits=2. The output in this case will
- be of shape [1, 8, 128, 128, 2]. The dimension of axis 1 has been reduced by a factor
+ be of shape `[1, 8, 128, 128, 2]`. The dimension of axis 1 has been reduced by a factor
of 8 since each value is packed into an 8-bit uint8. Axis 4 is now two bitplanes
representing the quantized value of the incoming data. The output tensor is now
ready to be used in a bitserial operation.
import re
import logging
-import topi
+from tvm import topi
from ....target import arm_isa
from .generic import *
from .. import op as _op
"""Definition of bifrost operator strategy."""
# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
import re
-import topi
+from tvm import topi
from .generic import *
from .. import op as _op
# under the License.
"""Definition of CUDA/GPU operator strategy."""
# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
-import topi
+from tvm import topi
import tvm
from tvm.te import SpecializedCondition
from tvm.contrib import nvcc
import logging
import re
-import topi
-from topi.util import get_const_int, get_const_float, get_const_tuple, get_float_tuple
+from tvm import topi
+from tvm.topi.util import get_const_int, get_const_float, get_const_tuple, get_float_tuple
from .. import op as _op
from ....target import generic_func, override_native_generic_func
# under the License.
"""Definition of HLS operator strategy."""
# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
-import topi
+from tvm import topi
from .generic import *
from .. import op as _op
# under the License.
"""Definition of x86 operator strategy."""
# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
-import topi
+from tvm import topi
from .generic import *
from .. import op as _op
"""Definition of mali operator strategy."""
# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
import re
-import topi
+from tvm import topi
from .generic import *
from .. import op as _op
# under the License.
"""Definition of ROCm operator strategy."""
# pylint: disable=invalid-name,unused-argument,unused-wildcard-import,wildcard-import
-import topi
+from tvm import topi
from .generic import *
from .. import op as _op
import logging
import re
-import topi
+from tvm import topi
from tvm.te import SpecializedCondition
from .generic import *
from .. import op as _op
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Faster R-CNN and Mask R-CNN operations."""
-import topi
-from topi.util import get_const_tuple
+from tvm import topi
+from tvm.topi.util import get_const_tuple
from .. import op as reg
from .. import strategy
from ..op import OpPattern
"""Definition of vision ops"""
from __future__ import absolute_import
-import topi
+from tvm import topi
from tvm.te.hybrid import script
from .. import op as reg
from .. import strategy
#pylint: disable=unused-argument,inconsistent-return-statements
"""Internal module for registering attribute for annotation."""
import warnings
-import topi
+from tvm import topi
import tvm._ffi
from tvm.relay.op import op as _reg
from .. import expr as _expr
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
-"""Internal utilities for parsing Python subset to HalideIR"""
+"""Internal utilities for parsing Python subset to TIR"""
import ast
import inspect
Some of the schedule function may have been specially optimized for a
specific workload.
"""
-from __future__ import absolute_import as _abs
-
from tvm._ffi.libinfo import __version__
# Ensure C++ schedules get registered first, so python schedules can
import tvm
from tvm import te
from tvm import autotvm
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
from .. import tag
from .bitserial_conv2d import _intrin_popcount
from ..nn.pad import pad
"""GEMM Convolution schedule on ARM"""
import tvm
from tvm import te
-from topi import nn
+from tvm.topi import nn
from ..util import get_const_tuple
from ..nn.util import get_pad_tuple
from .tensor_intrin import gemv_quantized, gemv_quantized_impl
import tvm
from tvm import autotvm
from tvm.autotvm.task import deserialize_args
-from topi.nn.conv2d import conv2d_nchw, conv2d_nhwc
-from topi.util import get_const_tuple, get_const_int, traverse_inline
+from tvm.topi.nn.conv2d import conv2d_nchw, conv2d_nhwc
+from tvm.topi.util import get_const_tuple, get_const_int, traverse_inline
def conv2d_direct(*args, **kwargs):
"""Schedule function for directly-scheduled conv2d."""
from tvm import autotvm
from tvm.autotvm.task import deserialize_args
from tvm import te
-from topi.util import simplify, traverse_inline
-from topi.nn.pad import pad
-from topi.nn.util import get_pad_tuple
+from tvm.topi.util import simplify, traverse_inline
+from tvm.topi.nn.pad import pad
+from tvm.topi.nn.util import get_pad_tuple
from ..micro_kernel.gemm import (
intrin_gemm_MxKxN, gemm_MxKxN_impl,
"""FFI for CUDA TOPI ops and schedules"""
import tvm._ffi
-tvm._ffi._init_api("topi.cuda", "topi.cpp.cuda")
+tvm._ffi._init_api("topi.cuda", "tvm.topi.cpp.cuda")
"""FFI for generic TOPI ops and schedules"""
import tvm._ffi
-tvm._ffi._init_api("topi.generic", "topi.cpp.generic")
+tvm._ffi._init_api("topi.generic", "tvm.topi.cpp.generic")
--- /dev/null
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""Load Lib for C++ TOPI ops and schedules"""
+import tvm._ffi
+
+tvm._ffi._init_api("topi", "tvm.topi.cpp")
"""FFI for NN TOPI ops and schedules"""
import tvm._ffi
-tvm._ffi._init_api("topi.nn", "topi.cpp.nn")
+tvm._ffi._init_api("topi.nn", "tvm.topi.cpp.nn")
"""FFI for Rocm TOPI ops and schedules"""
import tvm._ffi
-tvm._ffi._init_api("topi.rocm", "topi.cpp.rocm")
+tvm._ffi._init_api("topi.rocm", "tvm.topi.cpp.rocm")
"""FFI for TOPI utility functions"""
import tvm._ffi
-tvm._ffi._init_api("topi.util", "topi.cpp.util")
+tvm._ffi._init_api("topi.util", "tvm.topi.cpp.util")
from . import yolo
-tvm._ffi._init_api("topi.vision", "topi.cpp.vision")
+tvm._ffi._init_api("topi.vision", "tvm.topi.cpp.vision")
"""FFI for Yolo TOPI ops and schedules"""
import tvm._ffi
-tvm._ffi._init_api("topi.vision.yolo", "topi.cpp.vision.yolo")
+tvm._ffi._init_api("topi.vision.yolo", "tvm.topi.cpp.vision.yolo")
"""FFI for x86 TOPI ops and schedules"""
import tvm._ffi
-tvm._ffi._init_api("topi.x86", "topi.cpp.x86")
+tvm._ffi._init_api("topi.x86", "tvm.topi.cpp.x86")
from tvm import te
from tvm.tir import if_then_else, exp
-import topi
+from tvm import topi
from ..nms import non_max_suppression
"""Dilation2D operators"""
from __future__ import absolute_import as _abs
from tvm import te
-from topi.util import simplify
+from tvm.topi.util import simplify
from ..nn.pad import pad
from ..nn.util import get_pad_tuple
from __future__ import absolute_import
import tvm
from tvm import te
-from topi.util import nchw_pack_layout, nchw_xc_layout
+from tvm.topi.util import nchw_pack_layout, nchw_xc_layout
from .. import tag
def get_2d_indices(indices, layout='NCHW'):
def resize3d(data, size, layout="NCDHW", method="nearest_neighbor",
coordinate_transformation_mode="align_corners", out_dtype=None):
"""Perform resize operation on the data.
+
Parameters
----------
inputs: tvm.te.Tensor
Method to be used for resizing.
out_dtype: string, optional
Type to return. If left None will be same as input type.
+
Returns
-------
output : tvm.te.Tensor
from __future__ import absolute_import
import tvm
from tvm import te
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
from .bitserial_util import bitpack
def bitserial_dense(data, weight, data_bits, weight_bits, pack_dtype='uint32',
import numpy as np
import tvm
from tvm import te
-from topi.transform import concatenate
+from tvm.topi.transform import concatenate
from ..util import get_const_int
def bitpack(data, bits, pack_axis, bit_axis, pack_type, name="QuantizeInput"):
"""Packs data into format necessary for bitserial computation
+
+ Parameters
+ ----------
pack_axis : int
index of the axis to pack in data
bit_axis : int
- index of axis to place bit axis in resulting packed data"""
+ index of axis to place bit axis in resulting packed data
+ """
ishape = data.shape
n = len(ishape)
if pack_type == 'uint8':
def conv2d_winograd_nnpack_weight_transform(kernel, convolution_algorithm, out_dtype):
"""Weight transformation for winograd
- Parameters
+
+ Parameters
----------
kernel: Tensor
The raw kernel tensor with layout "NCHW". Only 3x3 kernel is supported for now.
convolution_algorithm: int
The convolution algorithm for Winograd NNPACK.
- Returns
+
+ Returns
-------
output : tvm.te.Tensor
4-D with shape [alpha, alpha, CO, CI]
def unpack_NCHWc_to_nchw(packed_out, out_dtype):
"""Unpack conv2d_NCHWc output from layout NCHWc to NCHW
- Parameters
- -----------
+ Parameters
+ ----------
packed_out : tvm.te.Tensor
The output tensor of conv2d_NCHWc.
@tvm.te.tag_scope(tag=tag.BROADCAST)
def prelu(x, slope, axis=1):
- """ PReLU.
+ """PReLU.
It accepts two arguments: an input ``x`` and a weight array ``W``
and computes the output as :math:`PReLU(x) y = x > 0 ? x : W * x`,
where :math:`*` is an elementwise multiplication for each sample in the
batch.
- Arguments:
+
+ Parameters
+ ----------
x : tvm.te.Tensor
Input argument.
axis : int
The axis where the channel data needs to be applied
- Returns:
+ Returns
+ -------
y : tvm.te.Tensor
The result.
- Links:
- [http://arxiv.org/pdf/1502.01852v1.pdf]
+ Links
+ -----
+ [http://arxiv.org/pdf/1502.01852v1.pdf]
"""
assert len(slope.shape) == 1
# specific language governing permissions and limitations
# under the License.
"""TVM operator upsampling compute."""
-import topi
+from tvm import topi
from tvm import te
from ..util import simplify
"""Bilinear Scale in python"""
import math
import numpy as np
-from topi.util import nchw_pack_layout
+from tvm.topi.util import nchw_pack_layout
def bilinear_resize_python(image, out_size, layout, coordinate_transformation_mode="align_corners"):
""" Bilinear scaling using python"""
"""Common utility for topi test"""
import tvm
-import topi
+from tvm import topi
_injective_schedule = {
"generic": topi.generic.schedule_injective,
# pylint: disable=unused-variable, invalid-name
"""1D convolution in python"""
import numpy as np
-from topi.nn.util import get_pad_tuple1d
+from tvm.topi.nn.util import get_pad_tuple1d
def dilate_np(x, dilation):
"""Transposed 1D convolution in python"""
import numpy as np
import scipy
-import topi
-from topi.nn.util import get_pad_tuple1d
+import tvm.topi.testing
+from tvm.topi.nn.util import get_pad_tuple1d
def conv1d_transpose_ncw_python(a_np, w_np, stride, padding, output_padding):
"""Transposed 1D convolution operator in NCW layout.
assert opad < stride_w
fpad_left, fpad_right = get_pad_tuple1d(padding, filter_w)
# dilate stage
- dilated_a_np = topi.testing.dilate_python(a_np, [1, 1, stride_w])
+ dilated_a_np = tvm.topi.testing.dilate_python(a_np, [1, 1, stride_w])
# padding stage
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right + opad
"""Convolution in python"""
import numpy as np
import scipy.signal
-from topi.nn.util import get_pad_tuple
+from tvm.topi.nn.util import get_pad_tuple
def conv2d_hwcn_python(a_np, w_np, stride, padding):
"""Convolution in python"""
import numpy as np
import scipy.signal
-from topi.nn.util import get_pad_tuple
+from tvm.topi.nn.util import get_pad_tuple
def _conv2d_nchw_python(a_np, w_np, stride, padding):
"""Convolution in python"""
import numpy as np
import scipy.signal
-from topi.nn.util import get_pad_tuple
+from tvm.topi.nn.util import get_pad_tuple
def _conv2d_nhwc_python(a_np, w_np, stride, padding):
"""Transposed convolution in python"""
import numpy as np
import scipy
-import topi
-from topi.nn.util import get_pad_tuple
+import tvm.topi.testing
+from tvm.topi.nn.util import get_pad_tuple
def conv2d_transpose_nchw_python(a_np, w_np, stride, padding, output_padding):
opad_h, opad_w = output_padding
assert opad_h < stride_h and opad_w < stride_w
# dilate stage
- dilated_a_np = topi.testing.dilate_python(a_np, [1, 1, stride_h, stride_w])
+ dilated_a_np = tvm.topi.testing.dilate_python(a_np, [1, 1, stride_h, stride_w])
# padding stage
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(padding, (filter_h, filter_w))
bpad_top = filter_h - 1 - fpad_top
"""Convolution 3D in python"""
import numpy as np
import scipy.signal
-from topi.nn.util import get_pad_tuple3d
+from tvm.topi.nn.util import get_pad_tuple3d
def _conv3d_ncdhw_python(a_np, w_np, stride, padding):
"""Convolution 3D in python"""
import numpy as np
import scipy.signal
-from topi.nn.util import get_pad_tuple3d
+from tvm.topi.nn.util import get_pad_tuple3d
def conv3d_ndhwc_python(a_np, w_np, stride, padding):
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals, too-many-branches
"""Convolution 3D transpose in python"""
import numpy as np
-import topi
-from topi.nn.util import get_pad_tuple3d
+import tvm.topi.testing
+from tvm.topi.nn.util import get_pad_tuple3d
def conv3d_transpose_ncdhw_python(a_np, w_np, stride, padding):
stride_d, stride_h, stride_w = stride
# dilate stage
- dilated_a_np = topi.testing.dilate_python(a_np, [1, 1, stride_d, stride_h, stride_w])
+ dilated_a_np = tvm.topi.testing.dilate_python(a_np, [1, 1, stride_d, stride_h, stride_w])
# padding stage
fpad_front, fpad_top, fpad_left, fpad_back, fpad_bottom, fpad_right = get_pad_tuple3d(
out_w = (in_w - 1) * stride_w - fpad_left - fpad_right + filter_w
w_np = np.flip(w_np, axis=[2, 3, 4]).transpose((1, 0, 2, 3, 4))
- b_np = topi.testing.conv3d_ncdhw_python(padded_a_np, w_np, stride=(1, 1, 1), padding=(0, 0, 0))
+ b_np = tvm.topi.testing.conv3d_ncdhw_python(padded_a_np, w_np, stride=(1, 1, 1), padding=(0, 0, 0))
return b_np
"""Deformable convolution in python"""
import itertools
import numpy as np
-from topi.nn.util import get_pad_tuple
+from tvm.topi.nn.util import get_pad_tuple
def deformable_conv2d_nchw_python(a_np, offset_np, w_np, stride, padding, dilation,
deformable_groups, groups):
"""Upsampling in python"""
import math
import numpy as np
-from topi.util import nchw_pack_layout
+from tvm.topi.util import nchw_pack_layout
def upsample_nearest(arr, scale):
from __future__ import absolute_import as _abs
import tvm
from tvm import te
-import topi
+from tvm import topi
from . import cpp
from . import tag
from .util import within_index, make_idx
This operation can always be composed of unsqueezing and
expanding dims on those unsqueezed axes.
- Examples::
- input = [ 12. 19. 27.]
- input.shape = (3,)
+ Examples
+ --------
+ .. code-block::
- new_shape_array = [[[1,2],[2,3],[1,3]],
- [[1,4],[4,3],[5,2]],
- [[7,1],[7,2],[7,3]]]
- new_shape_array.shape = (3, 3, 2)
+ input = [ 12. 19. 27.]
+ input.shape = (3,)
- expand_like(input, [1,2], new_shape_array) =
- [[[12,12],[12,12],[12,12]],
- [[19,19],[19,19],[19,19]],
- [[27,27],[27,27],[27,27]]]
+ new_shape_array = [[[1,2],[2,3],[1,3]],
+ [[1,4],[4,3],[5,2]],
+ [[7,1],[7,2],[7,3]]]
+ new_shape_array.shape = (3, 3, 2)
+
+ expand_like(input, [1,2], new_shape_array) =
+ [[[12,12],[12,12],[12,12]],
+ [[19,19],[19,19],[19,19]],
+ [[27,27],[27,27],[27,27]]]
Parameters
----------
The tensor to with target shape.
axis: list of int
axis to be expanded on
+
Returns
-------
ret : tvm.te.Tensor
def matmul(a, b, transp_a=False, transp_b=False):
"""
Creates an operation that calculates a matrix multiplication (row-major notation):
- A(i, k) * B(k, j)
+ A(i, k) * B(k, j)
if trans_a == trans_b, the usual transposed combinations, otherwise
Parameters
def within_index(b, e, s, i):
"""Return a boolean value that indicates if i is within the given index.
- Parameter
- ---------
+ Parameters
+ ----------
b : Expr
beginning of the index
The returned value is only meaningful if within_index() returns True
for the same set of parameters.
- Parameter
- ---------
+ Parameters
+ ----------
b : Expr
beginning of the index
from tvm.te import hybrid
from tvm.tir import exp, sqrt
-import topi
+from tvm import topi
from ..nms import non_max_suppression
import tvm
from tvm import te
from tvm import autotvm
-from topi.util import get_const_int, get_const_tuple
+from tvm.topi.util import get_const_int, get_const_tuple
from .. import tag
from ..nn.bitserial_util import bitpack, binary_op_multiplier
let output = std::process::Command::new(mf_dir!("/tests/build_model.py"))
.env(
"PYTHONPATH",
- concat!(
- mf_dir!("/../../python"),
- ":",
- mf_dir!("/../../nnvm/python"),
- ":",
- mf_dir!("/../../topi/python")
- ),
+ concat!(mf_dir!("/../../python"), ":", mf_dir!("/../../nnvm/python")),
)
.output()
.expect("Failed to build test model");
*/
#include "compile_engine.h"
-#include <topi/tags.h>
#include <tvm/driver/driver_api.h>
#include <tvm/ir/type_functor.h>
#include <tvm/relay/analysis.h>
#include <tvm/te/operation.h>
#include <tvm/te/schedule.h>
#include <tvm/te/schedule_pass.h>
+#include <tvm/topi/tags.h>
#include <functional>
#include <limits>
* \brief Registration of annotation operators.
*/
-#include <topi/elemwise.h>
#include <tvm/relay/attrs/annotation.h>
#include <tvm/relay/expr.h>
#include <tvm/relay/op.h>
#include <tvm/relay/op_attr_types.h>
#include <tvm/tir/expr.h>
+#include <tvm/topi/elemwise.h>
#include "../../transforms/infer_layout_util.h"
#include "../type_relations.h"
* \brief Property def of nn operators.
*/
-#include <topi/elemwise.h>
#include <tvm/relay/attrs/debug.h>
#include <tvm/relay/op.h>
#include <tvm/tir/data_layout.h>
+#include <tvm/topi/elemwise.h>
#include <vector>
*/
#include "transform.h"
-#include <topi/broadcast.h>
-#include <topi/transform.h>
#include <tvm/relay/attrs/transform.h>
#include <tvm/relay/op.h>
#include <tvm/relay/op_attr_types.h>
#include <tvm/runtime/registry.h>
+#include <tvm/topi/broadcast.h>
+#include <tvm/topi/transform.h>
#include <utility>
#include <vector>
RELAY_REGISTER_OP("dyn.reshape")
.describe(R"code(Reshapes the input array based on the values in the newshape array.
-
+
To give user more convenience in without doing manual shape inference,
some dimensions of the shape can take special values from the set {0, -1, -3}.
The significance of each is explained below:
data.shape = (2,3,4,5), newshape = (-3,-3), result.shape = (6,20)
data.shape = (2,3,4), newshape = (0,-3), result.shape = (2,12)
- Special values -2 and -4 from the standard reshape op would introduce dynamic rank
+ Special values -2 and -4 from the standard reshape op would introduce dynamic rank
in this op. Thus, they are not permitted.
)code" TVM_ADD_FILELINE)
* \brief Operators for manifest shape-aware memory allocation in Relay.
*/
-#include <topi/elemwise.h>
#include <tvm/relay/attrs/memory.h>
#include <tvm/relay/expr.h>
#include <tvm/relay/op.h>
#include <tvm/relay/op_attr_types.h>
#include <tvm/runtime/data_type.h>
+#include <tvm/topi/elemwise.h>
#include "../../transforms/infer_layout_util.h"
#include "../op_common.h"
* \file correlation.cc
* \brief Correlation operators
*/
-#include <topi/nn.h>
#include <tvm/relay/attrs/nn.h>
#include <tvm/relay/op.h>
#include <tvm/tir/data_layout.h>
#include <tvm/tir/op.h>
+#include <tvm/topi/nn.h>
#include <vector>
#include "nn.h"
-#include <topi/nn.h>
-#include <topi/nn/bias_add.h>
-#include <topi/nn/flatten.h>
-#include <topi/nn/softmax.h>
#include <tvm/relay/attrs/image.h>
#include <tvm/relay/attrs/nn.h>
#include <tvm/relay/op.h>
#include <tvm/tir/data_layout.h>
+#include <tvm/topi/nn.h>
+#include <tvm/topi/nn/bias_add.h>
+#include <tvm/topi/nn/flatten.h>
+#include <tvm/topi/nn/softmax.h>
#include <string>
#include <vector>
* \file pad.cc
* \brief Implementation of operator pad
*/
-#include <topi/nn.h>
#include <tvm/relay/attrs/nn.h>
#include <tvm/relay/op.h>
#include <tvm/tir/data_layout.h>
#include <tvm/tir/op.h>
+#include <tvm/topi/nn.h>
#include <vector>
*/
#include "pooling.h"
-#include <topi/nn/pooling.h>
#include <tvm/relay/attrs/nn.h>
#include <tvm/relay/op.h>
#include <tvm/relay/op_attr_types.h>
#include <tvm/tir/data_layout.h>
+#include <tvm/topi/nn/pooling.h>
#include <vector>
* \file binary.cc
* \brief binary broadcast operators.
*/
-#include <topi/broadcast.h>
#include <tvm/relay/expr.h>
#include <tvm/relay/op.h>
+#include <tvm/topi/broadcast.h>
#include "../op_common.h"
#include "../type_relations.h"
* \file reduce.cc
* \brief Reduction operators.
*/
-#include <topi/elemwise.h>
-#include <topi/reduction.h>
#include <tvm/relay/attrs/reduce.h>
#include <tvm/relay/expr.h>
#include <tvm/relay/op.h>
+#include <tvm/topi/elemwise.h>
+#include <tvm/topi/reduction.h>
#include <limits>
#include <numeric>
*/
#include "transform.h"
-#include <topi/broadcast.h>
-#include <topi/elemwise.h>
-#include <topi/nn.h>
-#include <topi/reduction.h>
-#include <topi/transform.h>
#include <tvm/ir/error.h>
#include <tvm/relay/attrs/transform.h>
#include <tvm/relay/op.h>
#include <tvm/tir/data_layout.h>
#include <tvm/tir/expr.h>
#include <tvm/tir/op.h>
+#include <tvm/topi/broadcast.h>
+#include <tvm/topi/elemwise.h>
+#include <tvm/topi/nn.h>
+#include <tvm/topi/reduction.h>
+#include <tvm/topi/transform.h>
#include <vector>
* \file unary.cc
* \brief Unary operators.
*/
-#include <topi/elemwise.h>
-#include <topi/transform.h>
#include <tvm/relay/attrs/transform.h>
#include <tvm/relay/expr.h>
#include <tvm/relay/op.h>
+#include <tvm/topi/elemwise.h>
+#include <tvm/topi/transform.h>
#include "../make_op.h"
#include "../op_common.h"
* \file yolo.cc
* \brief Yolo related operators
*/
-#include <topi/vision/reorg.h>
#include <tvm/relay/attrs/vision.h>
#include <tvm/relay/op.h>
+#include <tvm/topi/vision/reorg.h>
#include <vector>
* \brief Dialect operators for Relay VM.
*/
-#include <topi/elemwise.h>
#include <tvm/relay/attrs/memory.h>
#include <tvm/relay/attrs/vm.h>
#include <tvm/relay/expr.h>
#include <tvm/relay/op.h>
#include <tvm/relay/op_attr_types.h>
#include <tvm/runtime/data_type.h>
+#include <tvm/topi/elemwise.h>
#include "../../transforms/infer_layout_util.h"
#include "../op_common.h"
* (3) and sum them together to get the adjoint of the input itself.
* The three steps are computed recursively.
*/
-#include <topi/elemwise.h>
-#include <topi/transform.h>
#include <tvm/runtime/registry.h>
#include <tvm/te/autodiff.h>
#include <tvm/tir/stmt_functor.h>
+#include <tvm/topi/elemwise.h>
+#include <tvm/topi/transform.h>
#include <memory>
#include <vector>
* \brief Registration of broadcast operators
* \file broadcast.cc
*/
-#include <topi/broadcast.h>
-#include <topi/util.h>
#include <tvm/runtime/packed_func.h>
#include <tvm/runtime/registry.h>
+#include <tvm/topi/broadcast.h>
+#include <tvm/topi/util.h>
+namespace tvm {
namespace topi {
using namespace tvm;
});
} // namespace topi
+} // namespace tvm
* \brief Registration of elemwise operators
* \file elemwise.cc
*/
-#include <topi/elemwise.h>
#include <tvm/runtime/packed_func.h>
#include <tvm/runtime/registry.h>
+#include <tvm/topi/elemwise.h>
+namespace tvm {
namespace topi {
using namespace tvm;
});
} // namespace topi
+} // namespace tvm
* \brief Registration of NN operators
* \file nn.cc
*/
-#include <topi/nn.h>
-#include <topi/nn/batch_matmul.h>
-#include <topi/nn/bias_add.h>
-#include <topi/nn/bnn.h>
-#include <topi/nn/dense.h>
-#include <topi/nn/dilate.h>
-#include <topi/nn/flatten.h>
-#include <topi/nn/local_response_norm.h>
-#include <topi/nn/mapping.h>
-#include <topi/nn/pooling.h>
-#include <topi/nn/softmax.h>
#include <tvm/runtime/packed_func.h>
#include <tvm/runtime/registry.h>
-
+#include <tvm/topi/nn.h>
+#include <tvm/topi/nn/batch_matmul.h>
+#include <tvm/topi/nn/bias_add.h>
+#include <tvm/topi/nn/bnn.h>
+#include <tvm/topi/nn/dense.h>
+#include <tvm/topi/nn/dilate.h>
+#include <tvm/topi/nn/flatten.h>
+#include <tvm/topi/nn/local_response_norm.h>
+#include <tvm/topi/nn/mapping.h>
+#include <tvm/topi/nn/pooling.h>
+#include <tvm/topi/nn/softmax.h>
+
+namespace tvm {
namespace topi {
using namespace tvm;
});
} // namespace topi
+} // namespace tvm
* \brief Registration of reduction operators
* \file reduction.cc
*/
-#include <topi/reduction.h>
-#include <topi/util.h>
#include <tvm/runtime/packed_func.h>
#include <tvm/runtime/registry.h>
+#include <tvm/topi/reduction.h>
+#include <tvm/topi/util.h>
+namespace tvm {
namespace topi {
using namespace tvm;
});
} // namespace topi
+} // namespace tvm
*/
#define TOPI_REDUCE_ATLEAST1D 0
-#include <topi/cuda/dense.h>
-#include <topi/cuda/injective.h>
-#include <topi/cuda/normalization.h>
-#include <topi/cuda/pooling.h>
-#include <topi/cuda/reduction.h>
-#include <topi/cuda/softmax.h>
-#include <topi/detail/tensor_utils.h>
-#include <topi/generic/default.h>
-#include <topi/generic/extern.h>
-#include <topi/generic/injective.h>
-#include <topi/rocm/dense.h>
-#include <topi/rocm/injective.h>
-#include <topi/rocm/normalization.h>
-#include <topi/rocm/pooling.h>
-#include <topi/rocm/reduction.h>
-#include <topi/rocm/softmax.h>
-#include <topi/x86/bnn.h>
-#include <topi/x86/default.h>
-#include <topi/x86/injective.h>
#include <tvm/ir/expr.h>
#include <tvm/runtime/module.h>
#include <tvm/runtime/packed_func.h>
#include <tvm/runtime/registry.h>
#include <tvm/target/generic_func.h>
-
+#include <tvm/topi/cuda/dense.h>
+#include <tvm/topi/cuda/injective.h>
+#include <tvm/topi/cuda/normalization.h>
+#include <tvm/topi/cuda/pooling.h>
+#include <tvm/topi/cuda/reduction.h>
+#include <tvm/topi/cuda/softmax.h>
+#include <tvm/topi/detail/tensor_utils.h>
+#include <tvm/topi/generic/default.h>
+#include <tvm/topi/generic/extern.h>
+#include <tvm/topi/generic/injective.h>
+#include <tvm/topi/rocm/dense.h>
+#include <tvm/topi/rocm/injective.h>
+#include <tvm/topi/rocm/normalization.h>
+#include <tvm/topi/rocm/pooling.h>
+#include <tvm/topi/rocm/reduction.h>
+#include <tvm/topi/rocm/softmax.h>
+#include <tvm/topi/x86/bnn.h>
+#include <tvm/topi/x86/default.h>
+#include <tvm/topi/x86/injective.h>
+
+namespace tvm {
namespace topi {
using namespace tvm;
.register_func({"rocm"}, WrapDenseOp(topi::rocm::dense_rocm));
} // namespace topi
+} // namespace tvm
* \brief Registration of transform operators
* \file transform.cc
*/
-#include <topi/transform.h>
-#include <topi/util.h>
#include <tvm/runtime/packed_func.h>
#include <tvm/runtime/registry.h>
+#include <tvm/topi/transform.h>
+#include <tvm/topi/util.h>
+namespace tvm {
namespace topi {
using namespace tvm;
});
} // namespace topi
+} // namespace tvm
* \brief Registration of vision operators
* \file vision.cc
*/
-#include <topi/vision/reorg.h>
#include <tvm/runtime/packed_func.h>
#include <tvm/runtime/registry.h>
+#include <tvm/topi/vision/reorg.h>
+namespace tvm {
namespace topi {
using namespace tvm;
});
} // namespace topi
+} // namespace tvm
#include <dmlc/logging.h>
#include <gtest/gtest.h>
-#include <topi/nn.h>
#include <tvm/auto_scheduler/compute_dag.h>
#include <tvm/runtime/container.h>
#include <tvm/te/operation.h>
+#include <tvm/topi/nn.h>
#include <unordered_set>
#include <dmlc/logging.h>
#include <gtest/gtest.h>
-#include <topi/cuda/injective.h>
#include <tvm/driver/driver_api.h>
#include <tvm/runtime/registry.h>
#include <tvm/te/operation.h>
+#include <tvm/topi/cuda/injective.h>
#include <cmath>
#include <string>
*/
#include <gtest/gtest.h>
-#include <topi/broadcast.h>
-#include <topi/generic/injective.h>
#include <tvm/driver/driver_api.h>
#include <tvm/ir/module.h>
#include <tvm/relay/analysis.h>
#include <tvm/runtime/packed_func.h>
#include <tvm/runtime/registry.h>
#include <tvm/te/operation.h>
+#include <tvm/topi/broadcast.h>
+#include <tvm/topi/generic/injective.h>
using namespace tvm;
using namespace tvm::relay;
*/
#include <gtest/gtest.h>
-#include <topi/broadcast.h>
-#include <topi/generic/injective.h>
#include <tvm/driver/driver_api.h>
#include <tvm/ir/module.h>
#include <tvm/node/structural_equal.h>
#include <tvm/runtime/packed_func.h>
#include <tvm/runtime/registry.h>
#include <tvm/te/operation.h>
+#include <tvm/topi/broadcast.h>
+#include <tvm/topi/generic/injective.h>
using namespace tvm;
*/
#include <gtest/gtest.h>
-#include <topi/elemwise.h>
#include <tvm/te/operation.h>
+#include <tvm/topi/elemwise.h>
+namespace tvm {
namespace topi {
TEST(Tensor, Basic) {
using namespace tvm;
auto C = topi::exp(A);
}
} // namespace topi
+} // namespace tvm
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
#include <gtest/gtest.h>
#include <spawn.h>
#include <sys/wait.h>
-#include <topi/generic/injective.h>
#include <tvm/driver/driver_api.h>
#include <tvm/relay/analysis.h>
#include <tvm/relay/expr.h>
#include <tvm/runtime/packed_func.h>
#include <tvm/runtime/registry.h>
#include <tvm/te/operation.h>
+#include <tvm/topi/generic/injective.h>
TVM_REGISTER_GLOBAL("test.sch").set_body([](tvm::TVMArgs args, tvm::TVMRetValue* rv) {
*rv = topi::generic::schedule_injective(args[0], args[1]);
import tvm
from tvm import te
import numpy as np
-import topi.testing
+import tvm.topi.testing
from tvm.contrib import cblas
def verify_matmul_add(m, l, n, transa=False, transb=False, dtype="float32"):
a = a.transpose(0, 2, 1)
if not transb:
b = b.transpose(0, 2, 1)
- return topi.testing.batch_matmul(a, b)
+ return tvm.topi.testing.batch_matmul(a, b)
def verify(target="llvm"):
if not tvm.runtime.enabled(target):
from tvm.contrib import cudnn
from tvm.contrib.nvcc import have_fp16
import numpy as np
-import topi.testing
+import tvm.topi.testing
def verify_conv2d(data_dtype, conv_dtype, tensor_format=0, groups=1):
in_channel = 4
w = tvm.nd.array(w_np, ctx)
y = tvm.nd.array(y_np, ctx)
if tensor_format == 0:
- c_np = topi.testing.conv2d_nchw_python(x_np, w_np, 1, 1, groups=groups)
+ c_np = tvm.topi.testing.conv2d_nchw_python(x_np, w_np, 1, 1, groups=groups)
elif tensor_format == 1:
wt = w_np.transpose((1, 2, 3, 0)) #OHWI => HWIO
- c_np = topi.testing.conv2d_nhwc_python(x_np, wt, 1, 1, groups=groups)
+ c_np = tvm.topi.testing.conv2d_nhwc_python(x_np, wt, 1, 1, groups=groups)
f(x, w, y)
tvm.testing.assert_allclose(y.asnumpy(), c_np, atol=1e-2, rtol=1e-2)
w = tvm.nd.array(w_np, ctx)
y = tvm.nd.array(y_np, ctx)
if tensor_format == 0:
- c_np = topi.testing.conv3d_ncdhw_python(x_np, w_np, 1, 1, groups)
+ c_np = tvm.topi.testing.conv3d_ncdhw_python(x_np, w_np, 1, 1, groups)
else:
raise AssertionError("For now, conv3d tensor format only support: 0(NCHW)")
ctx = tvm.gpu(0)
a_np = np.random.uniform(size=shape).astype(dtype)
- b_np = topi.testing.softmax_python(a_np)
+ b_np = tvm.topi.testing.softmax_python(a_np)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(b_np, ctx)
f = tvm.build(s, [A, B], "cuda", target_host="llvm", name="softmax")
ctx = tvm.gpu(0)
n, c, h, w = shape
a_np = np.random.uniform(size=shape).astype(dtype)
- b_np = topi.testing.softmax_python(a_np.transpose(0, 2, 3, 1).reshape(h*w, c))
+ b_np = tvm.topi.testing.softmax_python(a_np.transpose(0, 2, 3, 1).reshape(h*w, c))
b_np = b_np.reshape(n, h, w, c).transpose(0, 3, 1, 2)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(b_np, ctx)
import tvm
from tvm import te
import numpy as np
-from topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int16
+from tvm.topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int16
def benchmark_fc_int8_acc16():
import tvm
from tvm import te
import numpy as np
-from topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32_cascadelake
-from topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32
+from tvm.topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32_cascadelake
+from tvm.topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32
import pytest
data_type=1)
yshape = [x.value for x in Y.shape]
- import topi
+ from tvm import topi
s = te.create_schedule(Y.op)
def verify():
User can directly run this script to verify correctness.
"""
import mxnet as mx
- import topi
+ from tvm import topi
import tvm
from tvm import te
import numpy as np
from tvm import te
import numpy as np
import scipy.signal
-from topi.nn.util import get_pad_tuple
+from tvm.topi.nn.util import get_pad_tuple
from tvm.contrib import nnpack
import pytest
from tvm import te
import numpy as np
import re
-import topi
+from tvm import topi
def findany(pattern, str):
import tvm
from tvm import te
from tvm.contrib import graph_runtime
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from tvm import relay
from tvm.relay.testing.config import ctx_list
-from topi.testing import conv2d_nchw_python
+from tvm.topi.testing import conv2d_nchw_python
import coremltools as cm
import model_zoo
a_np = np.full(input_dim, 1, dtype=dtype)
if mode == 'NN':
- b_np = topi.testing.upsampling_python(a_np, (scale, scale))
+ b_np = tvm.topi.testing.upsampling_python(a_np, (scale, scale))
else:
new_h = input_dim[2] * scale
new_w = input_dim[3] * scale
- b_np = topi.testing.bilinear_resize_python(a_np, (new_h, new_w), 'NCHW')
+ b_np = tvm.topi.testing.bilinear_resize_python(a_np, (new_h, new_w), 'NCHW')
input = [('input', datatypes.Array(*input_dim))]
output = [('output', datatypes.Array(*b_np.shape))]
dtype = "float32"
a_np = np.random.uniform(size=input_dim).astype(dtype)
- b_np = topi.testing.l2_normalize_python(a_np, eps, 1)
+ b_np = tvm.topi.testing.l2_normalize_python(a_np, eps, 1)
input = [('input', datatypes.Array(*input_dim))]
output = [('output', datatypes.Array(*b_np.shape))]
dtype = "float32"
axis=1
a_np = np.random.uniform(size=input_dim).astype(dtype)
- b_np = topi.testing.lrn_python(a_np, size, axis, bias, alpha, beta)
+ b_np = tvm.topi.testing.lrn_python(a_np, size, axis, bias, alpha, beta)
input = [('input', datatypes.Array(*input_dim))]
output = [('output', datatypes.Array(*b_np.shape))]
from onnx import helper, TensorProto, mapping
import torch
import torchvision
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
import tvm
from tvm import te
from tvm import relay
def verify_gather_nd(in_shape, indices, dtype):
x = np.random.uniform(size=in_shape).astype(dtype)
indices = np.array(indices, dtype="int32")
- out_np = topi.testing.gather_nd_python(x, indices)
+ out_np = tvm.topi.testing.gather_nd_python(x, indices)
y = helper.make_node("GatherND", ['in', 'indices'], ['out'])
'out'], mode='nearest', scales=[1.0, 1.0, 2.0, 2.0])
in_array = np.random.uniform(size=in_shape).astype(np.float32)
- out_array = topi.testing.upsampling_python(
+ out_array = tvm.topi.testing.upsampling_python(
in_array, (scale, scale), "NCHW")
graph = helper.make_graph([y],
'out'], mode='nearest', scales=[1.0, 1.0, 2.0, 2.0, 2.0])
in_array = np.random.uniform(size=in_shape).astype(np.float32)
- out_array = topi.testing.upsampling3d_python(
+ out_array = tvm.topi.testing.upsampling3d_python(
in_array, (scale, scale, scale), "NCDHW")
graph = helper.make_graph([y],
'out'], mode='linear', scales=[1.0, 1.0, 2.0, 2.0])
in_array = np.random.uniform(size=in_shape).astype(np.float32)
- out_array = topi.testing.bilinear_resize_python(
+ out_array = tvm.topi.testing.bilinear_resize_python(
in_array, (3*scale, 3*scale), "NCHW")
graph = helper.make_graph([y],
y = helper.make_node("Upsample", ['in', 'scales'], ['out'], mode='linear')
scales = [1, 1, 2, 2]
in_array = np.random.uniform(size=in_shape).astype(np.float32)
- out_array = topi.testing.bilinear_resize_python(
+ out_array = tvm.topi.testing.bilinear_resize_python(
in_array, (3*scale, 3*scale), "NCHW")
ref_node = helper.make_node('Constant',
y = helper.make_node("Upsample", ['in', 'scales'], ['out'], mode='linear')
scales = [1.0, 1.0, 2.0, 2.0, 2.0]
in_array = np.random.uniform(size=in_shape).astype(np.float32)
- out_array = topi.testing.trilinear_resize3d_python(
+ out_array = tvm.topi.testing.trilinear_resize3d_python(
in_array, (3*scale, 3*scale, 3*scale), "NCDHW", coordinate_transformation_mode="half_pixel")
ref_array = np.array(scales)
opname = 'Softmax'
indata = np.random.uniform(size=inshape).astype(np.float32)
outshape = inshape
- outdata = topi.testing.softmax_python(indata)
+ outdata = tvm.topi.testing.softmax_python(indata)
if isinstance(axis, int):
y = helper.make_node(opname, ['in'], ['out'], axis=axis)
elif axis is None:
def test_LogSoftmax():
_test_onnx_op_elementwise((1, 4),
- topi.testing.log_softmax_python,
+ tvm.topi.testing.log_softmax_python,
{},
'float32',
'LogSoftmax',
from tvm.autotvm.task.space import FallbackConfigEntity
from tvm.contrib import nnpack
from tvm.contrib.pickle_memoize import memoize
-import topi
-import topi.testing
-from topi.util import get_const_tuple
+from tvm import topi
+import tvm.topi.testing
+from tvm.topi.util import get_const_tuple
from pytest import skip
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
- dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
- c_np = topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding)
+ dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
+ c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
from tvm import relay
from tvm.relay.loops import while_loop
from tvm.relay.testing import run_infer_type as infer_type
-import topi.testing
+import tvm.topi.testing
def int32(val):
return relay.const(val, 'int32')
np_end = np.random.randint(5, 10, size=end_shape, dtype="int32")
np_strides = np.random.randint(1, 2 if slice_mode == "size" else 3, size=strides_shape, dtype="int32")
# target numpy result
- ref_res = topi.testing.strided_slice_python(np_data, np_begin, np_end, np_strides, slice_mode)
+ ref_res = tvm.topi.testing.strided_slice_python(np_data, np_begin, np_end, np_strides, slice_mode)
# Relay Module
mod = tvm.IRModule()
mod["main"] = relay.Function([data, boxes, box_indices], y)
data_np = np.random.uniform(size=data_shape).astype(dtype)
boxes_np = np.random.uniform(size=static_boxes).astype(dtype)
- box_indices_np = np.random.uniform(size=static_box_indices_shape).astype(indices_dtype)
+ box_indices_np = np.random.uniform(size=static_box_indices_shape).astype(indices_dtype)
for kind in ["debug", "vm"]:
ex = relay.create_executor(kind, mod=mod, ctx=tvm.cpu(), target="llvm")
result = ex.evaluate()(data_np, boxes_np, box_indices_np)
def test_any_crop_and_resize():
verify_any_crop_and_resize(
- data_shape=(1, 234, 234, 256),
- boxes_shape=(relay.Any(), 4),
+ data_shape=(1, 234, 234, 256),
+ boxes_shape=(relay.Any(), 4),
box_indices_shape=(relay.Any(),),
crop_size=(14, 14),
layout='NHWC',
static_box_indices_shape=(128,),
ref_out_shape=(128, 14, 14, 256))
verify_any_crop_and_resize(
- data_shape=(1, 256, 234, 234),
- boxes_shape=(relay.Any(), 4),
+ data_shape=(1, 256, 234, 234),
+ boxes_shape=(relay.Any(), 4),
box_indices_shape=(relay.Any(),),
crop_size=(14, 14),
layout='NCHW',
import tvm.testing
from tvm import relay
from tvm import autotvm
-import topi
+from tvm import topi
from tvm.relay.testing import run_infer_type
from tvm.relay.testing.temp_op_attr import TempOpAttr
from scipy import special
import tvm
import tvm.relay as relay
-import topi
+from tvm import topi
from tvm import te
from tvm.contrib import graph_runtime
# under the License.
import numpy as np
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
import tvm
from tvm import te
from tvm import relay
ph, pw = padding
y_shape = topi.util.get_const_tuple(fwd_func.ret_type.shape)
out_grad = np.ones(shape=y_shape)
- ref_grad = topi.testing.pool_grad_nchw(data, out_grad, pool_size=pool_size, strides=strides,
- padding=[ph, pw, ph, pw],
- pool_type='max', ceil_mode=ceil_mode)
+ ref_grad = tvm.topi.testing.pool_grad_nchw(
+ data, out_grad, pool_size=pool_size, strides=strides,
+ padding=[ph, pw, ph, pw],
+ pool_type='max', ceil_mode=ceil_mode)
for target, ctx in ctx_list():
intrp = relay.create_executor(ctx=ctx, target=target)
ph, pw = padding
y_shape = topi.util.get_const_tuple(fwd_func.ret_type.shape)
out_grad = np.ones(shape=y_shape)
- ref_grad = topi.testing.pool_grad_nchw(data, out_grad, pool_size=pool_size, strides=strides,
- padding=[ph, pw, ph, pw],
- pool_type='avg', ceil_mode=ceil_mode)
+ ref_grad = tvm.topi.testing.pool_grad_nchw(
+ data, out_grad, pool_size=pool_size, strides=strides,
+ padding=[ph, pw, ph, pw],
+ pool_type='avg', ceil_mode=ceil_mode)
for target, ctx in ctx_list():
intrp = relay.create_executor(ctx=ctx, target=target)
data = np.random.rand(*x_shape).astype("float32")
y_shape = topi.util.get_const_tuple(fwd_func.ret_type.shape)
out_grad = np.ones(shape=y_shape)
- ref_grad = topi.testing.pool_grad_nchw(data, out_grad, pool_size=(x_shape[2], x_shape[3]),
- strides=(1, 1), padding=[0, 0, 0, 0], pool_type='avg',
- ceil_mode=False)
+ ref_grad = tvm.topi.testing.pool_grad_nchw(
+ data, out_grad, pool_size=(x_shape[2], x_shape[3]),
+ strides=(1, 1), padding=[0, 0, 0, 0], pool_type='avg',
+ ceil_mode=False)
for target, ctx in ctx_list():
intrp = relay.create_executor(ctx=ctx, target=target)
from tvm import relay
from tvm.relay import transform
from tvm.relay.testing import ctx_list, run_infer_type
-import topi.testing
+import tvm.topi.testing
from tvm.contrib.nvcc import have_fp16
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], y)
x_data = np.random.uniform(size=shape).astype(dtype)
- ref_res = topi.testing.softmax_python(x_data)
+ ref_res = tvm.topi.testing.softmax_python(x_data)
for target, ctx in ctx_list():
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], y)
x_data = np.random.uniform(size=shape).astype(dtype)
- ref_res = topi.testing.log_softmax_python(x_data)
+ ref_res = tvm.topi.testing.log_softmax_python(x_data)
for target, ctx in ctx_list():
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
import numpy as np
import tvm
from tvm import te
-import topi.testing
+import tvm.topi.testing
from tvm import relay
from tvm.relay import transform
from tvm.relay.testing import ctx_list, run_infer_type
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
def test_checkpoint():
x = relay.Var("x", relay.ty.TensorType(shape , dtype))
y = relay.Var("y", relay.ty.TensorType(shape_like, dtype))
z = relay.broadcast_to_like(x, y)
-
+
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType(shape_like, dtype)
x = np.random.uniform(size=shape).astype(dtype)
y = np.random.uniform(size=shape_like).astype(dtype)
ref_res = np.broadcast_to(x, shape_like)
-
+
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
func = relay.Function([x, y], z)
x_np = np.random.uniform(size=x_shape).astype(dtype)
y_np = np.random.uniform(size=y_shape).astype(dtype)
- z_np = topi.testing.batch_matmul(x_np, y_np)
+ z_np = tvm.topi.testing.batch_matmul(x_np, y_np)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
func = relay.Function([x], y)
np_data = np.random.uniform(low=0, high=255, size=dshape).astype(dtype)
- np_out = topi.testing.adaptive_pool(np_data, out_size, pool_type, layout)
+ np_out = tvm.topi.testing.adaptive_pool(np_data, out_size, pool_type, layout)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
func = relay.Function([data, valid_length], out)
data_np = np.random.uniform(size=data_shape).astype(dtype)
valid_length_np = np.random.randint(0, max_length, size=nbatch).astype(itype)
- gt_out_np = topi.testing.sequence_mask(data_np, valid_length_np, mask_value, axis)
+ gt_out_np = tvm.topi.testing.sequence_mask(data_np, valid_length_np, mask_value, axis)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
assert checked.checked_type == relay.ty.TensorType(_get_oshape(indices_shape, depth, axis), dtype)
func = relay.Function([indices], out)
indices_np = np.random.randint(0, depth, size=indices_shape).astype("int32")
- out_np = topi.testing.one_hot(indices_np, on_value, off_value, depth, axis, dtype)
+ out_np = tvm.topi.testing.one_hot(indices_np, on_value, off_value, depth, axis, dtype)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
from tvm.relay import transform
from tvm.relay.testing import ctx_list, run_infer_type
from tvm.contrib import util
-import topi.testing
-from topi.cuda.conv3d_winograd import _infer_tile_size
+import tvm.topi.testing
+from tvm.topi.cuda.conv3d_winograd import _infer_tile_size
def test_conv1d_infer_type():
func = relay.Function([x, w], y)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
- ref_res = topi.testing.conv1d_ncw_python(
+ ref_res = tvm.topi.testing.conv1d_ncw_python(
data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, dilation)
for target, ctx in ctx_list():
func = relay.Function([x, w], y)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
- dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation)
+ dkernel = tvm.topi.testing.dilate_python(kernel, (1, 1) + dilation)
if fref is None:
- ref_res = topi.testing.conv2d_nchw_python(
+ ref_res = tvm.topi.testing.conv2d_nchw_python(
data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding,
groups=groups)
else:
kshape = (32, 1, 3, 3)
run_test_conv2d("float32", "float32", 1, dshape, kshape,
padding=(1, 1), channels=32, groups=32, kernel_size=(3 ,3),
- fref=lambda x, w: topi.testing.depthwise_conv2d_python_nchw(
+ fref=lambda x, w: tvm.topi.testing.depthwise_conv2d_python_nchw(
x, w, (1, 1), "SAME"))
# depthwise conv2d for arm_cpu
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
- ref_res = topi.testing.conv2d_nchw_python(
+ ref_res = tvm.topi.testing.conv2d_nchw_python(
data.astype(out_dtype), kernel.astype(out_dtype), 1, padding,
groups=groups)
func = relay.Function([x, w], y)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
- dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation)
+ dkernel = tvm.topi.testing.dilate_python(kernel, (1, 1) + dilation)
if fref is None:
- ref_res = topi.testing.conv3d_ncdhw_python(
+ ref_res = tvm.topi.testing.conv3d_ncdhw_python(
data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding,
groups=groups)
else:
func = relay.Function([x, w], y)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
- dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation)
+ dkernel = tvm.topi.testing.dilate_python(kernel, (1, 1) + dilation)
if fref is None:
- ref_res = topi.testing.conv3d_ndhwc_python(
+ ref_res = tvm.topi.testing.conv3d_ndhwc_python(
data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding)
else:
ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype))
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
- ref_res = topi.testing.conv3d_ncdhw_python(
+ ref_res = tvm.topi.testing.conv3d_ncdhw_python(
data.astype(out_dtype), kernel.astype(out_dtype), 1, padding,
groups=groups)
data = np.random.uniform(size=dshape).astype(dtype)
kernel = np.random.uniform(size=kshape).astype(dtype)
- ref_res = topi.testing.conv3d_transpose_ncdhw_python(data, kernel, 1, 1)
+ ref_res = tvm.topi.testing.conv3d_transpose_ncdhw_python(data, kernel, 1, 1)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
dtype = "float32"
data = np.random.uniform(size=dshape).astype(dtype)
kernel = np.random.uniform(size=kshape).astype(dtype)
- ref_res = topi.testing.conv2d_transpose_nchw_python(
+ ref_res = tvm.topi.testing.conv2d_transpose_nchw_python(
data, kernel, 2, 1, (1, 1))
for target, ctx in ctx_list():
kernel = np.random.uniform(size=kshape_hwoi).astype(dtype)
# use true kshape layout here - HWOI
- ref_res = topi.testing.conv2d_transpose_nhwc_python(data, kernel, 'HWOI',
+ ref_res = tvm.topi.testing.conv2d_transpose_nhwc_python(data, kernel, 'HWOI',
2, 1, output_padding=(1, 1))
for target, ctx in ctx_list():
dtype = "float32"
data = np.random.uniform(size=dshape).astype(dtype)
kernel = np.random.uniform(size=kshape).astype(dtype)
- ref_res = topi.testing.conv1d_transpose_ncw_python(
+ ref_res = tvm.topi.testing.conv1d_transpose_ncw_python(
data, kernel, 2, 1, output_padding=(1,))
for target, ctx in ctx_list():
y = opfunc(x, pool_size=pool_size, strides=strides, padding=padding)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
- ref_res = topi.testing.pool1d_ncw_python(data, (2,), (2,),
+ ref_res = tvm.topi.testing.pool1d_ncw_python(data, (2,), (2,),
(0, 0), (1, 3, 16), pool_type, False)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
assert out_shape == f_out_shape, \
"Output shape mismatch. expected {}, actual {}".format(out_shape, f_out_shape)
data = np.random.uniform(size=dshape).astype(dtype)
- ref_res = topi.testing.pool3d_ncdhw_python(data, pool_size, strides,
+ ref_res = tvm.topi.testing.pool3d_ncdhw_python(data, pool_size, strides,
padding, out_shape, pool_type, False)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
- ref_res = topi.testing.lrn_python(x_data, size, axis, bias, alpha, beta)
+ ref_res = tvm.topi.testing.lrn_python(x_data, size, axis, bias, alpha, beta)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
- ref_res = topi.testing.l2_normalize_python(x_data, eps, axis)
+ ref_res = tvm.topi.testing.l2_normalize_python(x_data, eps, axis)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
if method == "nearest_neighbor":
- ref = topi.testing.upsampling_python(data, (scale_h, scale_w), layout)
+ ref = tvm.topi.testing.upsampling_python(data, (scale_h, scale_w), layout)
else:
- ref = topi.testing.bilinear_resize_python(data, (int(round(h*scale_h)),
+ ref = tvm.topi.testing.bilinear_resize_python(data, (int(round(h*scale_h)),
int(round(w*scale_w))), layout)
for target, ctx in ctx_list():
executor = relay.create_executor("graph", ctx=ctx, target=target)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
if method == "nearest_neighbor":
- ref = topi.testing.upsampling3d_python(data, (scale_d, scale_h, scale_w), layout)
+ ref = tvm.topi.testing.upsampling3d_python(data, (scale_d, scale_h, scale_w), layout)
else:
- ref = topi.testing.trilinear_resize3d_python(data, (int(round(d*scale_d)),\
+ ref = tvm.topi.testing.trilinear_resize3d_python(data, (int(round(d*scale_d)),\
int(round(h*scale_h)),\
int(round(w*scale_w))), layout)
for target, ctx in ctx_list():
func = relay.Function([data1, data2], y)
data1_np = np.random.uniform(size=data_shape).astype(dtype)
data2_np = np.random.uniform(size=data_shape).astype(dtype)
- ref_res = topi.testing.correlation_nchw_python(data1_np, data2_np, kernel_size, max_displacement, stride1, stride2, padding, is_multiply)
+ ref_res = tvm.topi.testing.correlation_nchw_python(data1_np, data2_np, kernel_size, max_displacement, stride1, stride2, padding, is_multiply)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
from tvm import relay
from tvm.relay import transform
from tvm.relay.testing import ctx_list, run_infer_type
-import topi.testing
+import tvm.topi.testing
def test_binary_op():
# target numpy result
x_data = np.random.uniform(size=dshape).astype("float32")
- ref_res = topi.testing.strided_slice_python(
+ ref_res = tvm.topi.testing.strided_slice_python(
x_data, begin, end, strides, slice_mode)
if attr_const:
return
x_data = np.random.uniform(size=dshape).astype("float32")
v_data = np.random.uniform(size=vshape).astype("float32")
- ref_res = topi.testing.strided_set_python(
+ ref_res = tvm.topi.testing.strided_set_python(
x_data, v_data, begin, end, strides)
for target, ctx in ctx_list():
intrp = relay.create_executor("graph", ctx=ctx, target=target)
from tvm import relay
from tvm.relay import transform
from tvm.relay.testing import ctx_list, run_infer_type
-import topi.testing
+import tvm.topi.testing
def test_resize_infer_type():
x_data = np.random.uniform(size=dshape).astype("float32")
if method == "bilinear":
- ref_res = topi.testing.bilinear_resize_python(x_data, size, layout)
+ ref_res = tvm.topi.testing.bilinear_resize_python(x_data, size, layout)
else:
- ref_res = topi.testing.upsampling_python(x_data, (scale, scale), layout)
+ ref_res = tvm.topi.testing.upsampling_python(x_data, (scale, scale), layout)
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.image.resize(x, size, layout, method, "align_corners")
assert "size=" in z.astext()
x_data = np.random.uniform(size=dshape).astype("float32")
if method == "trilinear":
- ref_res = topi.testing.trilinear_resize3d_python(x_data, size, layout)
+ ref_res = tvm.topi.testing.trilinear_resize3d_python(x_data, size, layout)
else:
- ref_res = topi.testing.upsampling3d_python(x_data, (scale, scale, scale), layout)
+ ref_res = tvm.topi.testing.upsampling3d_python(x_data, (scale, scale, scale), layout)
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.image.resize3d(x, size, layout, method, "align_corners")
assert "size=" in z.astext()
image_data = np.random.uniform(size=img_shape).astype("float32")
- ref_res = topi.testing.crop_and_resize_python(image_data,
+ ref_res = tvm.topi.testing.crop_and_resize_python(image_data,
boxes,
box_indices,
crop_size,
np_data = np.random.uniform(size=data_shape).astype("float32")
np_rois = np.random.uniform(size=rois_shape).astype('float32') * in_size
np_rois[:, 0] = np.random.randint(low = 0, high = batch, size = num_roi)
- ref_res = topi.testing.roi_align_nchw_python(np_data, np_rois, pooled_size=pooled_size,
+ ref_res = tvm.topi.testing.roi_align_nchw_python(np_data, np_rois, pooled_size=pooled_size,
spatial_scale=spatial_scale,
sample_ratio=sample_ratio)
for target, ctx in ctx_list():
np_data = np.random.uniform(size=data_shape).astype("float32")
np_rois = np.random.uniform(size=rois_shape).astype('float32') * in_size
np_rois[:, 0] = np.random.randint(low = 0, high = batch, size = num_roi).astype('float32')
- ref_res = topi.testing.roi_pool_nchw_python(np_data, np_rois, pooled_size=pooled_size,
+ ref_res = tvm.topi.testing.roi_pool_nchw_python(np_data, np_rois, pooled_size=pooled_size,
spatial_scale=spatial_scale)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
def test_yolo_reorg():
def verify_yolo_reorg(shape, stride):
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
- ref_res = topi.testing.reorg_python(x_data, stride)
+ ref_res = tvm.topi.testing.reorg_python(x_data, stride)
x = relay.var("x", relay.TensorType(shape, "float32"))
z = relay.vision.yolo_reorg(x, stride=stride)
data = np.random.uniform(size=data_shape).astype(dtype)
offset = np.random.uniform(size=offset_shape).astype(dtype)
kernel = np.random.uniform(size=kernel_shape).astype(dtype)
- ref_res = topi.testing.deformable_conv2d_nchw_python(data, offset, kernel, stride=(1, 1), padding=(1, 1), dilation=(1, 1), deformable_groups=deformable_groups, groups=groups)
+ ref_res = tvm.topi.testing.deformable_conv2d_nchw_python(data, offset, kernel, stride=(1, 1), padding=(1, 1), dilation=(1, 1), deformable_groups=deformable_groups, groups=groups)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
x_data = np.random.uniform(size=dshape).astype("float32")
if layout == "NHWC":
x_data = np.transpose(x_data, axes=[0, 3, 1, 2])
- ref_res = topi.testing.depth_to_space_python(x_data, block_size, mode=mode)
+ ref_res = tvm.topi.testing.depth_to_space_python(x_data, block_size, mode=mode)
if layout == "NHWC":
x_data = np.transpose(x_data, axes=[0, 2, 3, 1])
ref_res = np.transpose(ref_res, axes=[0, 2, 3, 1])
x_data = np.random.uniform(size=dshape).astype("float32")
if layout == "NHWC":
x_data = np.transpose(x_data, axes=[0, 3, 1, 2])
- ref_res = topi.testing.space_to_depth_python(x_data, block_size)
+ ref_res = tvm.topi.testing.space_to_depth_python(x_data, block_size)
if layout == "NHWC":
x_data = np.transpose(x_data, axes=[0, 2, 3, 1])
ref_res = np.transpose(ref_res, axes=[0, 2, 3, 1])
func = relay.Function([data], y)
data_np = np.random.uniform(size=data_shape).astype(dtype)
- ref_res = topi.testing.affine_grid_python(data_np, target_shape)
+ ref_res = tvm.topi.testing.affine_grid_python(data_np, target_shape)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
data_np = np.random.uniform(size=data_shape).astype(dtype)
grid_np = np.random.uniform(size=grid_shape, low=-1.5, high=1.5).astype(dtype)
- ref_res = topi.testing.grid_sample_nchw_python(data_np, grid_np, method='bilinear')
+ ref_res = tvm.topi.testing.grid_sample_nchw_python(data_np, grid_np, method='bilinear')
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
import numpy as np
from tvm import relay
from tvm.contrib import graph_runtime
-import topi.testing
+import tvm.topi.testing
def test_same_io_qnn_params():
data_dtype = 'int32'
import numpy as np
from tvm import relay
from tvm.contrib import graph_runtime
-import topi.testing
+import tvm.topi.testing
# "unquantize" a quantized tensor
def recover(data, scale, zp):
y = relay.Function(analysis.free_vars(y), y)
return y
- import topi
+ from tvm import topi
def alter_conv2d(attrs, inputs, tinfos, out_type):
with tvm.target.create("llvm"):
return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, out_type)
def test_alter_layout_nhwc_arm():
""" Check that AlterOplayout does not alter NHWC data layout. """
def alter_conv2d(attrs, inputs, tinfos, out_type):
- import topi
+ from tvm import topi
with tvm.target.create("llvm -device=arm_cpu"):
return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, out_type)
self.memory[key] = cfg
def alter_conv2d(attrs, inputs, tinfos, out_type):
- import topi
+ from tvm import topi
with tvm.target.create("llvm -device=arm_cpu -mtriple=aarch64-linux-gnu"):
with Int8Fallback():
tmp = topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, out_type)
from tvm import te
from tvm import autotvm
from tvm.autotvm.task.space import FallbackConfigEntity
-import topi
+from tvm import topi
def get_all_backend():
"""return all supported target
import tvm
from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
import numpy as np
from tvm.contrib.pickle_memoize import memoize
with tvm.target.create(device):
out = topi.nn.fifo_buffer(data, buffer, axis=axis)
- s = topi.testing.get_injective_schedule(device)([out])
+ s = tvm.topi.testing.get_injective_schedule(device)([out])
buffer_tvm = tvm.nd.array(buffer_np, ctx=ctx)
data_tvm = tvm.nd.array(data_np, ctx=ctx)
return
print(' Running on target: {}'.format(device))
- conv2d_nchw, schedule_conv2d_nchw = topi.testing.get_conv2d_nchw_implement(device)
+ conv2d_nchw, schedule_conv2d_nchw = tvm.topi.testing.get_conv2d_nchw_implement(device)
with tvm.target.create(device):
out = topi.nn.fifo_buffer(inc_input, context, axis=buffer_axis)
- s = topi.testing.get_injective_schedule(device)([out])
+ s = tvm.topi.testing.get_injective_schedule(device)([out])
update_context = tvm.build(s, [inc_input, context, out], device, name='update_context')
out = conv2d_nchw(context, kernel, stride, padding, dilate, dtype)
conv2d_inc = tvm.build(s, [context, kernel, out], device, name='conv2d_inc')
out = topi.nn.fifo_buffer(inc_output, output_window, axis=buffer_axis)
- s = topi.testing.get_injective_schedule(device)([out])
+ s = tvm.topi.testing.get_injective_schedule(device)([out])
update_output_window = tvm.build(s, [inc_output, output_window, out], device,
name='update_output_window')
out = topi.nn.fifo_buffer(inc_input, input_window, axis=buffer_axis)
- s = topi.testing.get_injective_schedule(device)([out])
+ s = tvm.topi.testing.get_injective_schedule(device)([out])
update_input_window = tvm.build(s, [inc_input, input_window, out], device,
name='update_input_window')
# under the License.
import tvm
from tvm import te
-import topi
-from topi import util
+from tvm import topi
+from tvm.topi import util
def test_util():
import numpy as np
import tvm
from tvm import te
-import topi
-import topi.testing
-from topi.util import get_const_tuple
+from tvm import topi
+import tvm.topi.testing
+from tvm.topi.util import get_const_tuple
from tvm.contrib.pickle_memoize import memoize
from common import get_all_backend
def get_ref_data():
a_np = np.random.uniform(size=(batch, M, K)).astype(dtype)
b_np = np.random.uniform(size=(batch, N, K)).astype(dtype)
- c_np = topi.testing.batch_matmul(a_np, b_np)
+ c_np = tvm.topi.testing.batch_matmul(a_np, b_np)
return (a_np, b_np, c_np)
# get the test data
a_np, b_np, c_np = get_ref_data()
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- fcompute, fschedule = topi.testing.dispatch(device, _batch_matmul_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _batch_matmul_implement)
out = fcompute(x, y)
s = fschedule([out])
a = tvm.nd.array(a_np, ctx)
import numpy as np
import tvm
from tvm import te
-import topi
-import topi.testing
-from topi.util import get_const_tuple
+from tvm import topi
+import tvm.topi.testing
+from tvm.topi.util import get_const_tuple
from tvm.contrib.pickle_memoize import memoize
def generate_quantized_np(shape, bits, out_dtype):
w_ = np.copy(w_np).astype(out_dtype)
for x in np.nditer(w_, op_flags=['readwrite']):
x[...] = 1 if x == 1 else -1
- b_np = topi.testing.conv2d_nchw_python(a_np.astype(out_dtype), w_, stride, padding)
+ b_np = tvm.topi.testing.conv2d_nchw_python(a_np.astype(out_dtype), w_, stride, padding)
else:
- b_np = topi.testing.conv2d_nchw_python(a_np, w_np, stride, padding)
+ b_np = tvm.topi.testing.conv2d_nchw_python(a_np, w_np, stride, padding)
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data()
w_ = np.copy(w_np).astype(out_dtype)
for x in np.nditer(w_, op_flags=['readwrite']):
x[...] = 1 if x == 1 else -1
- b_np = topi.testing.conv2d_nhwc_python(a_np, w_, stride, padding).astype(out_dtype)
+ b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, w_, stride, padding).astype(out_dtype)
else:
- b_np = topi.testing.conv2d_nhwc_python(a_np, w_np, stride, padding).astype(out_dtype)
+ b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, w_np, stride, padding).astype(out_dtype)
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data()
import numpy as np
import tvm
from tvm import te
-import topi
-import topi.testing
-from topi.util import get_const_tuple
+from tvm import topi
+import tvm.topi.testing
+from tvm.topi.util import get_const_tuple
def generate_quantized_np(shape, bits, out_dtype):
np.random.seed(0)
w_ = np.copy(w_np).astype(out_dtype)
for x in np.nditer(w_, op_flags=['readwrite']):
x[...] = 1 if x == 1 else -1
- b_np = topi.testing.conv2d_nhwc_python(a_np, w_, stride, padding).astype(out_dtype)
+ b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, w_, stride, padding).astype(out_dtype)
else:
- b_np = topi.testing.conv2d_nhwc_python(a_np, w_np, stride, padding).astype(out_dtype)
+ b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, w_np, stride, padding).astype(out_dtype)
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data()
a = tvm.nd.array(a_np, ctx)
import numpy as np
import tvm
from tvm import te
-import topi
-import topi.testing
-from topi.util import get_const_tuple
+from tvm import topi
+import tvm.topi.testing
+from tvm.topi.util import get_const_tuple
from tvm.contrib.pickle_memoize import memoize
_bitserial_dense_implement = {
input_dtype = 'uint8' if "arm_cpu" in target else "uint32"
A = te.placeholder((batch, in_dim), dtype=input_dtype, name='A')
B = te.placeholder((out_dim, in_dim), dtype=input_dtype, name='B')
- fcompute, fschedule = topi.testing.dispatch(target, _bitserial_dense_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(target, _bitserial_dense_implement)
C = fcompute(A, B, activation_bits, weight_bits,
input_dtype, out_dtype, unipolar)
s = fschedule([C])
import numpy as np
import tvm
from tvm import te
-import topi
-from topi.util import get_const_tuple
+from tvm import topi
+from tvm.topi.util import get_const_tuple
from tvm.contrib.pickle_memoize import memoize
import numpy as np
import tvm
from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from common import get_all_backend
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_broadcast_schedule(device)(B)
+ s = tvm.topi.testing.get_broadcast_schedule(device)(B)
foo = tvm.build(s, [A, B], device, name="broadcast_to")
data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = np.broadcast_to(data_npy, out_shape)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_broadcast_schedule(device)(C)
+ s = tvm.topi.testing.get_broadcast_schedule(device)(C)
foo = tvm.build(s, [A, B, C], device, name="broadcast_binary" + "_" + ftopi.__name__)
lhs_npy, lhs_nd = gen_operand(lhs_shape, lhs_min, lhs_max, ctx)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_broadcast_schedule(device)(B)
+ s = tvm.topi.testing.get_broadcast_schedule(device)(B)
foo = tvm.build(s, [A, B], device, name=name)
data_npy = indata.astype(A.dtype)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_broadcast_schedule(device)(B)
+ s = tvm.topi.testing.get_broadcast_schedule(device)(B)
foo = tvm.build(s, [A, B], device, name=name)
data_npy = np.random.uniform(size=shape).astype(A.dtype)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_broadcast_schedule(device)(C)
+ s = tvm.topi.testing.get_broadcast_schedule(device)(C)
foo = tvm.build(s, [A, B, C], device, name=name)
lhs_nd = tvm.nd.array(lhs, ctx)
import numpy as np
import tvm
from tvm import te
-import topi
-import topi.testing
-from topi.util import get_const_tuple
+from tvm import topi
+import tvm.topi.testing
+from tvm.topi.util import get_const_tuple
from tvm.contrib.pickle_memoize import memoize
from common import get_all_backend
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(B)
+ s = tvm.topi.testing.get_injective_schedule(device)(B)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), ctx)
import itertools
import tvm
from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
from common import get_all_backend
else:
np_in = a_np
np_w = w_np
- b_np = topi.testing.conv1d_ncw_python(np_in, np_w, stride, padding, dilation)
+ b_np = tvm.topi.testing.conv1d_ncw_python(np_in, np_w, stride, padding, dilation)
if layout == 'NWC':
b_np = np.transpose(b_np, [0, 2, 1])
return a_np, w_np, b_np
print("Skip because %s is not enabled" % device)
return
if layout == "NCW":
- fcompute, fschedule = topi.testing.dispatch(device, _conv1d_ncw_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv1d_ncw_implement)
else:
- fcompute, fschedule = topi.testing.dispatch(device, _conv1d_nwc_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv1d_nwc_implement)
with tvm.target.create(device):
B = fcompute(A, W, stride, padding, dilation, 'float32')
s = fschedule([B])
import itertools
import tvm
from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
from common import get_all_backend
_conv1d_transpose_ncw_implement = {
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
- b_np = topi.testing.conv1d_transpose_ncw_python(a_np, w_np, stride, padding, output_padding)
+ b_np = tvm.topi.testing.conv1d_transpose_ncw_python(a_np, w_np, stride, padding, output_padding)
c_np = np.maximum(b_np, 0)
return a_np, w_np, b_np, c_np
print("Skip because %s is not enabled" % device)
return
with tvm.target.create(device):
- fcompute, fschedule = topi.testing.dispatch(device, _conv1d_transpose_ncw_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv1d_transpose_ncw_implement)
B = fcompute(A, W, stride, padding, A.dtype, output_padding)
C = topi.nn.relu(B)
s1 = fschedule([B])
import tvm
from tvm import te
from tvm import autotvm
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
-from topi.nn.util import get_pad_tuple
-from topi.util import get_const_tuple
+from tvm.topi.nn.util import get_pad_tuple
+from tvm.topi.util import get_const_tuple
from common import get_all_backend
a_np = np.random.uniform(size=(batch, in_channel, in_height, in_width)).astype(dtype)
w_np = np.random.uniform(size=(num_filter, in_channel, kernel, kernel)).astype(dtype)
b_np = np.random.uniform(size=(num_filter, 1, 1)).astype(dtype)
- dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
- c_np = topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding)
+ dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
+ c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding)
if add_bias:
c_np += b_np
if add_relu:
import numpy as np
import tvm
from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
_conv2d_hwcn_implement = {
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=b_shape).astype(dtype)
- dw_np = topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
- c1_np = topi.testing.conv2d_hwcn_python(a_np, dw_np, stride, padding)
+ dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
+ c1_np = tvm.topi.testing.conv2d_hwcn_python(a_np, dw_np, stride, padding)
c2_np = c1_np + b_np
c3_np = np.maximum(c2_np, 0)
return a_np, w_np, b_np, c1_np, c2_np, c3_np
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- fcompute, fschedule = topi.testing.dispatch(device, _conv2d_hwcn_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv2d_hwcn_implement)
t_conv = fcompute(A, W, stride, padding, dilation)
t_bias = topi.add(t_conv, B)
t_relu = topi.nn.relu(t_bias)
from tvm import te
from tvm import autotvm
from tvm.autotvm.task.space import FallbackConfigEntity
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
-from topi.nn.util import get_pad_tuple
-from topi.util import get_const_tuple
-from topi.arm_cpu.conv2d_gemm import is_aarch64_arm
+from tvm.topi.nn.util import get_pad_tuple
+from tvm.topi.util import get_const_tuple
+from tvm.topi.arm_cpu.conv2d_gemm import is_aarch64_arm
from common import get_all_backend, Int8Fallback
a_np = np.random.randint(low=-128, high=127, size=a_shape).astype(dtype)
w_np = np.random.randint(low=-128, high=128, size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
- dw_np = topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
- c_np = topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding).astype(dtype)
+ dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
+ c_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding).astype(dtype)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
a_np = np.random.randint(low=-128, high=127, size=a_shape).astype(dtype)
w_np = np.random.randint(low=-128, high=128, size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
- dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
- c_np = topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding).astype(dtype)
+ dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
+ c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding).astype(dtype)
# convert to NCHWc
_, _, out_height, out_width = c_np.shape
a_np = np.random.randint(low=-128, high=127, size=a_shape).astype(dtype)
w_np = np.random.randint(low=-128, high=128, size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
- dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
- c_np = topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding).astype(dtype)
+ dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
+ c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding).astype(dtype)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
import tvm
from tvm import te
from tvm import autotvm
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
-from topi.nn.util import get_pad_tuple
-from topi.util import get_const_tuple
+from tvm.topi.nn.util import get_pad_tuple
+from tvm.topi.util import get_const_tuple
from common import get_all_backend
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
- dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
- c_np = topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding)
+ dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
+ c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding)
if add_bias:
c_np += b_np
if add_relu:
if "cudnn" in device:
fcompute, fschedule = topi.cuda.conv2d_cudnn, topi.cuda.schedule_conv2d_cudnn
else:
- fcompute, fschedule = topi.testing.get_conv2d_nchw_implement(device)
+ fcompute, fschedule = tvm.topi.testing.get_conv2d_nchw_implement(device)
with tvm.target.create(device):
if "cudnn" in device:
import numpy as np
import tvm
from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
- dw_np = topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
- b_np = topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
+ dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
+ b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data()
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- fcompute, fschedule = topi.testing.dispatch(device, _conv2d_nhwc_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv2d_nhwc_implement)
B = fcompute(A, W, stride, padding, dilation, dtype)
s = fschedule([B])
ctx = tvm.context(device, 0)
from tvm import te
from tvm import autotvm
from tvm.autotvm.task.space import FallbackConfigEntity
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
def verify_conv2d_1x1_nhwc_pack_int8(batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation=1):
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(adtype)
w_np = np.random.uniform(size=w_shape).astype(wdtype)
- dw_np = topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
- b_np = topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
+ dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
+ b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data()
import numpy as np
import tvm
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from tvm import te
from tvm.contrib.pickle_memoize import memoize
from tvm.contrib import nvcc
-from topi.nn.util import get_pad_tuple
-from topi.util import get_const_tuple
+from tvm.topi.nn.util import get_pad_tuple
+from tvm.topi.util import get_const_tuple
_conv2d_nhwc_tensorcore_implement = {
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
- dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
- c_np = topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
+ dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
+ c_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- fcompute, fschedule = topi.testing.dispatch(device, _conv2d_nhwc_tensorcore_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv2d_nhwc_tensorcore_implement)
C = fcompute(A, W, stride, padding, dilation, 'float32')
if add_bias:
C = topi.add(C, bias)
import numpy as np
import tvm
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from tvm import te
from tvm.contrib.pickle_memoize import memoize
from tvm.contrib import nvcc
-from topi.nn.util import get_pad_tuple
-from topi.util import get_const_tuple
+from tvm.topi.nn.util import get_pad_tuple
+from tvm.topi.util import get_const_tuple
_conv2d_nhwc_winograd_tensorcore = {
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
- dw_np = topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
- c_np = topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
+ dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
+ c_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
print("Running on target: %s" % device)
with tvm.target.create(device):
if bgemm == "direct":
- fcompute, fschedule = topi.testing.dispatch(device,
+ fcompute, fschedule = tvm.topi.testing.dispatch(device,
_conv2d_nhwc_winograd_direct)
elif bgemm == "tensorcore":
- fcompute, fschedule = topi.testing.dispatch(device,
+ fcompute, fschedule = tvm.topi.testing.dispatch(device,
_conv2d_nhwc_winograd_tensorcore)
C = fcompute(A, W, stride, padding, dilation, 'float32')
if add_bias:
import numpy as np
import tvm
from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
from common import get_all_backend
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
- b_np = topi.testing.conv2d_transpose_nchw_python(a_np, w_np, stride, padding, output_padding)
+ b_np = tvm.topi.testing.conv2d_transpose_nchw_python(a_np, w_np, stride, padding, output_padding)
c_np = np.maximum(b_np, 0)
return a_np, w_np, b_np, c_np
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- fcompute, fschedule = topi.testing.dispatch(device, _conv2d_transpose_nchw_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv2d_transpose_nchw_implement)
B = fcompute(A, W,
[stride_height, stride_width],
[pad_top, pad_left, pad_bottom, pad_right],
from tvm import te
from tvm import autotvm
from tvm.autotvm.task.space import FallbackConfigEntity
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
-from topi.nn.util import get_pad_tuple
-from topi.util import get_const_tuple
+from tvm.topi.nn.util import get_pad_tuple
+from tvm.topi.util import get_const_tuple
_conv2d_nchw_winograd_implement = {
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
- dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
- c_np = topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding)
+ dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
+ c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- fcompute, fschedule = topi.testing.dispatch(device, _conv2d_nchw_winograd_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv2d_nchw_winograd_implement)
C = fcompute(A, W, stride, padding, dilation, dtype)
if add_bias:
C = topi.add(C, bias)
import tvm
from tvm import te
from tvm import autotvm
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
-from topi.nn.util import get_pad_tuple3d
-from topi.util import get_const_tuple
+from tvm.topi.nn.util import get_pad_tuple3d
+from tvm.topi.util import get_const_tuple
from common import get_all_backend
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
- dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation, dilation))
- c_np = topi.testing.conv3d_ncdhw_python(a_np, dw_np, stride, padding)
+ dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation, dilation))
+ c_np = tvm.topi.testing.conv3d_ncdhw_python(a_np, dw_np, stride, padding)
if add_bias:
c_np += b_np
if add_relu:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
- fcompute, fschedule = topi.testing.dispatch(device, _conv3d_ncdhw_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv3d_ncdhw_implement)
with tvm.target.create(device):
C = fcompute(A, W, (stride, stride, stride), padding,
(dilation, dilation, dilation), dtype)
import numpy as np
import tvm
from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
from common import get_all_backend
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
- dw_np = topi.testing.dilate_python(w_np, (dilation, dilation, dilation, 1, 1))
- b_np = topi.testing.conv3d_ndhwc_python(a_np, dw_np, stride, padding)
+ dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, dilation, 1, 1))
+ b_np = tvm.topi.testing.conv3d_ndhwc_python(a_np, dw_np, stride, padding)
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data()
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
- fcompute, fschedule = topi.testing.dispatch(device, _conv3d_ndhwc_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv3d_ndhwc_implement)
with tvm.target.create(device):
B = fcompute(A, W, stride, padding, dilation, dtype)
s = fschedule([B])
import numpy as np
import tvm
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from tvm import te
from tvm.contrib.pickle_memoize import memoize
from tvm.contrib import nvcc
-from topi.nn.util import get_pad_tuple3d
-from topi.util import get_const_tuple
+from tvm.topi.nn.util import get_pad_tuple3d
+from tvm.topi.util import get_const_tuple
_conv3d_ndhwc_tensorcore_implement = {
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
- dw_np = topi.testing.dilate_python(w_np, (1, 1, 1, dilation, dilation))
- c_np = topi.testing.conv3d_ndhwc_python(a_np, dw_np, stride, padding)
+ dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, 1, dilation, dilation))
+ c_np = tvm.topi.testing.conv3d_ndhwc_python(a_np, dw_np, stride, padding)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- fcompute, fschedule = topi.testing.dispatch(device, _conv3d_ndhwc_tensorcore_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv3d_ndhwc_tensorcore_implement)
C = fcompute(A, W, stride, padding, dilation, 'float32')
if add_bias:
C = topi.add(C, bias)
import numpy as np
import tvm
from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
from common import get_all_backend
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
- b_np = topi.testing.conv3d_transpose_ncdhw_python(a_np, w_np, stride, padding)
+ b_np = tvm.topi.testing.conv3d_transpose_ncdhw_python(a_np, w_np, stride, padding)
c_np = np.maximum(b_np, 0)
return a_np, w_np, b_np, c_np
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- fcompute, fschedule = topi.testing.dispatch(device, _conv3d_transpose_ncdhw_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv3d_transpose_ncdhw_implement)
B = fcompute(A, W,
[stride_depth, stride_height, stride_width],
[pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right],
import tvm
from tvm import te
from tvm import autotvm
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
-from topi.nn.util import get_pad_tuple3d
-from topi.util import get_const_tuple
+from tvm.topi.nn.util import get_pad_tuple3d
+from tvm.topi.util import get_const_tuple
from common import get_all_backend
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
- dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation, dilation))
- c_np = topi.testing.conv3d_ncdhw_python(a_np, dw_np, stride, padding)
+ dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation, dilation))
+ c_np = tvm.topi.testing.conv3d_ncdhw_python(a_np, dw_np, stride, padding)
if add_bias:
c_np += b_np
if add_relu:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
- fcompute, fschedule = topi.testing.dispatch(device, _conv3d_ncdhw_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv3d_ncdhw_implement)
with tvm.target.create(device):
C = fcompute(A, W, (stride, stride, stride), padding, (dilation, dilation, dilation),
dtype)
import tvm
from tvm import te
from tvm import autotvm
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
from common import get_all_backend
def get_ref_data():
a_np = np.random.uniform(size=data_shape).astype(dtype)
b_np = np.random.uniform(size=data_shape).astype(dtype)
- c_np = topi.testing.correlation_nchw_python(a_np, b_np, kernel_size, max_displacement, stride1, stride2, pad_size, is_multiply)
+ c_np = tvm.topi.testing.correlation_nchw_python(a_np, b_np, kernel_size, max_displacement, stride1, stride2, pad_size, is_multiply)
return a_np, b_np, c_np
a_np, b_np, c_np = get_ref_data()
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
- fcompute, fschedule = topi.testing.dispatch(
+ fcompute, fschedule = tvm.topi.testing.dispatch(
device, _correlation_implement)
with tvm.target.create(device):
C = fcompute(A, B, kernel_size, max_displacement, stride1, stride2, pad_size, is_multiply)
import tvm
from tvm import te
from tvm import autotvm
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
from common import get_all_backend
offset_np = np.random.randn(*offset_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
- c_np = topi.testing.deformable_conv2d_nchw_python(a_np, offset_np, w_np, stride, padding,
+ c_np = tvm.topi.testing.deformable_conv2d_nchw_python(a_np, offset_np, w_np, stride, padding,
dilation, deformable_groups, groups)
return a_np, offset_np, w_np, c_np
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
- fcompute, fschedule = topi.testing.dispatch(device, _deformable_conv2d_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _deformable_conv2d_implement)
with tvm.target.create(device):
C = fcompute(A, Offset, W, stride, padding, dilation,
deformable_groups, groups, dtype)
import numpy as np
import tvm
from tvm import te
-import topi
-import topi.testing
-from topi.util import get_const_tuple
+from tvm import topi
+import tvm.topi.testing
+from tvm.topi.util import get_const_tuple
from tvm.contrib.pickle_memoize import memoize
from common import get_all_backend, Int8Fallback
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
- for fcompute, fschedule in topi.testing.dispatch(device, _dense_implement):
+ for fcompute, fschedule in tvm.topi.testing.dispatch(device, _dense_implement):
with tvm.target.create(device):
D = fcompute(A, B, C if use_bias else None)
D = topi.nn.relu(D)
"""Test code for dense tensorcore operator"""
import numpy as np
import tvm
-import topi
-import topi.testing
-from topi.util import get_const_tuple
+from tvm import topi
+import tvm.topi.testing
+from tvm.topi.util import get_const_tuple
from tvm import te
from tvm.contrib.pickle_memoize import memoize
from tvm.contrib import nvcc
print("skip because gpu does not support Tensor Cores")
return
print("Running on target: %s" % device)
- for fcompute, fschedule in topi.testing.dispatch(device, _dense_implement):
+ for fcompute, fschedule in tvm.topi.testing.dispatch(device, _dense_implement):
with tvm.target.create(device):
D = fcompute(A, B, C if use_bias else None)
D = topi.nn.relu(D)
import numpy as np
import tvm
from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from common import get_all_backend
B = topi.nn.depth_to_space(A, block_size=block_size, layout=layout, mode=mode)
if layout == 'NHWC':
a_np = np.transpose(a_np, axes=[0, 3, 1, 2])
- b_np = topi.testing.depth_to_space_python(a_np, block_size, mode=mode)
+ b_np = tvm.topi.testing.depth_to_space_python(a_np, block_size, mode=mode)
if layout == 'NHWC':
a_np = np.transpose(a_np, axes=[0, 2, 3, 1])
b_np = np.transpose(b_np, axes=[0, 2, 3, 1])
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(B)
+ s = tvm.topi.testing.get_injective_schedule(device)(B)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx)
f = tvm.build(s, [A, B], device)
import tvm
from tvm import te
from tvm import autotvm
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
import numpy as np
-from topi.util import get_const_tuple
-from topi.nn.util import get_pad_tuple
+from tvm.topi.util import get_const_tuple
+from tvm.topi.nn.util import get_pad_tuple
from tvm.contrib.pickle_memoize import memoize
from common import get_all_backend
return
print("Running on target: %s" % device)
- impl_list = topi.testing.dispatch(device, _depthwise_conv2d_nchw_implement)[:]
+ impl_list = tvm.topi.testing.dispatch(device, _depthwise_conv2d_nchw_implement)[:]
if device == "llvm" and channel_multiplier == 1 and dilation == 1:
impl_list.append((topi.x86.depthwise_conv2d_nchw, topi.x86.schedule_depthwise_conv2d_nchw))
def get_ref_data():
input_np = np.random.uniform(size=input_shape).astype(dtype)
filter_np = np.random.uniform(size=filter_shape).astype(dtype)
- dilated_filter_np = topi.testing.dilate_python(filter_np, (1, 1, dilation, dilation))
+ dilated_filter_np = tvm.topi.testing.dilate_python(filter_np, (1, 1, dilation, dilation))
scale_np = np.random.uniform(size=scale_shape).astype(dtype)
shift_np = np.random.uniform(size=shift_shape).astype(dtype)
# correctness with scipy
- depthwise_conv2d_scipy = topi.testing.depthwise_conv2d_python_nchw(
+ depthwise_conv2d_scipy = tvm.topi.testing.depthwise_conv2d_python_nchw(
input_np, dilated_filter_np, stride, padding)
scale_shift_scipy = np.zeros(shape=scale_shift_shape)
for c in range(in_channel * channel_multiplier):
return
print("Running on target: %s" % device)
- fcompute, fschedule = topi.testing.dispatch(device, _depthwise_conv2d_nhwc_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _depthwise_conv2d_nhwc_implement)
with tvm.target.create(device):
# declare
DepthwiseConv2d = fcompute(Input, Filter,
def get_ref_data():
input_np = np.random.uniform(size=input_shape).astype(dtype)
filter_np = np.random.uniform(size=filter_shape).astype(dtype)
- dilated_filter_np = topi.testing.dilate_python(filter_np, (dilation, dilation, 1, 1))
+ dilated_filter_np = tvm.topi.testing.dilate_python(filter_np, (dilation, dilation, 1, 1))
scale_np = np.random.uniform(size=scale_shape).astype(dtype)
shift_np = np.random.uniform(size=shift_shape).astype(dtype)
# correctness with scipy
- depthwise_conv2d_scipy = topi.testing.depthwise_conv2d_python_nhwc(
+ depthwise_conv2d_scipy = tvm.topi.testing.depthwise_conv2d_python_nhwc(
input_np, dilated_filter_np, stride=[stride_h, stride_w], padding=padding)
scale_shift_scipy = np.zeros(shape=scale_shift_shape)
for c in range(in_channel * channel_multiplier):
input_np = np.random.uniform(size=input_shape).astype(dtype)
filter_np = np.random.uniform(size=filter_shape).astype(dtype)
# correctness with scipy
- depthwise_conv2d_scipy = topi.testing.depthwise_conv2d_python_nchw(
+ depthwise_conv2d_scipy = tvm.topi.testing.depthwise_conv2d_python_nchw(
input_np, filter_np, stride, padding)
relu_scipy = np.maximum(depthwise_conv2d_scipy, 0)
return (_transform_data(input_np, ic_block),
# under the License.
import tvm
from tvm import te
-import topi
+from tvm import topi
import numpy as np
from tvm.contrib.pickle_memoize import memoize
from scipy import signal
-from topi.util import get_const_tuple
-from topi.nn.util import get_pad_tuple
-import topi.testing
-from topi.cuda.depthwise_conv2d import schedule_depthwise_conv2d_backward_input_nhwc
+from tvm.topi.util import get_const_tuple
+from tvm.topi.nn.util import get_pad_tuple
+import tvm.topi.testing
+from tvm.topi.cuda.depthwise_conv2d import schedule_depthwise_conv2d_backward_input_nhwc
def verify_depthwise_conv2d_back_input(batch, in_channel, in_h, channel_multiplier, filter_h, stride_h, padding_h):
def get_ref_data():
out_grad_np = np.random.uniform(size=out_grad_shape).astype(dtype)
filter_np = np.random.uniform(size=filter_shape).astype(dtype)
- dilated_out_grad_np = topi.testing.dilate_python(out_grad_np, [1, stride_h, stride_w, 1])
+ dilated_out_grad_np = tvm.topi.testing.dilate_python(out_grad_np, [1, stride_h, stride_w, 1])
# padding params in forward propagation
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple([padding_h, padding_w], (filter_h, filter_w))
# padding params in backward propagation
# under the License.
import tvm
from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
import numpy as np
from tvm.contrib.pickle_memoize import memoize
from scipy import signal
-from topi.util import get_const_tuple
-from topi.nn.util import get_pad_tuple
-from topi.cuda.depthwise_conv2d import schedule_depthwise_conv2d_backward_weight_nhwc
+from tvm.topi.util import get_const_tuple
+from tvm.topi.nn.util import get_pad_tuple
+from tvm.topi.cuda.depthwise_conv2d import schedule_depthwise_conv2d_backward_weight_nhwc
def verify_depthwise_conv2d_back_weight(batch, in_channel, in_h, channel_multiplier, filter_h, stride_h, padding_h):
def get_ref_data():
out_grad_np = np.random.uniform(size=out_grad_shape).astype(dtype)
input_np = np.random.uniform(size=in_shape).astype(dtype)
- dilated_out_grad_np = topi.testing.dilate_python(out_grad_np, [1, stride_h, stride_w, 1])
+ dilated_out_grad_np = tvm.topi.testing.dilate_python(out_grad_np, [1, stride_h, stride_w, 1])
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple([padding_h, padding_w], (filter_h, filter_w))
padded_input_np = np.zeros((batch, in_h+pad_top+pad_bottom, in_w+pad_left+pad_right, in_channel))
# under the License.
import tvm
from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
import numpy as np
Output = topi.nn.dilate(Input, strides)
schedule = te.create_schedule(Output.op)
input_np = np.random.uniform(size=input_size).astype(Input.dtype)
- output_np = topi.testing.dilate_python(input_np, strides)
+ output_np = tvm.topi.testing.dilate_python(input_np, strides)
input_tvm = tvm.nd.array(input_np, ctx=ctx)
output_size = topi.util.get_const_tuple(Output.shape)
output_tvm = tvm.nd.array(np.zeros(shape=output_size).astype(Output.dtype), ctx=ctx)
from tvm import te
from tvm import autotvm
from tvm.autotvm.task.space import FallbackConfigEntity
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
from common import get_all_backend, Int8Fallback
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
- dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
- c_np = topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding, groups).astype(dtype)
+ dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
+ c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding, groups).astype(dtype)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
print("Running on target: %s" % device)
with tvm.target.create(device):
- fcompute, fschedule = topi.testing.dispatch(device, _group_conv2d_nchw_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _group_conv2d_nchw_implement)
C = fcompute(A, W, stride, padding, dilation, groups, dtype)
if add_bias:
C = topi.add(C, bias)
a_np = np.random.randint(low=-128, high=127, size=a_shape).astype(dtype)
w_np = np.random.randint(low=-128, high=128, size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
- dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
- c_np = topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding, groups).astype(dtype)
+ dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
+ c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding, groups).astype(dtype)
# convert to NCHWc
_, _, out_height, out_width = c_np.shape
import tvm
from tvm import te
from tvm import autotvm
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
import pytest
from common import get_all_backend
def get_ref_data():
a_np = np.random.uniform(size=(batch, in_channel, in_height, in_width)).astype("uint8")
w_np = np.random.uniform(size=(num_filter, in_channel//groups, kernel, kernel)).astype("int8")
- c_np = topi.testing.conv2d_nchw_python(a_np, w_np, stride, padding, groups)
+ c_np = tvm.topi.testing.conv2d_nchw_python(a_np, w_np, stride, padding, groups)
return _transform_data(a_np, ic_block), _transform_kernel(w_np, ic_block, oc_block), \
_transform_data(c_np, oc_block)
import numpy as np
import tvm
from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from common import get_all_backend
'Layout not supported {} '.format(layout))
B = topi.image.resize(A, (out_height, out_width), layout=layout, coordinate_transformation_mode=coord_trans, method=method)
if method == "bilinear":
- b_np = topi.testing.bilinear_resize_python(a_np, (out_height, out_width), layout, coord_trans)
+ b_np = tvm.topi.testing.bilinear_resize_python(a_np, (out_height, out_width), layout, coord_trans)
else:
scale_h = out_height / in_height
scale_w = out_width / in_width
- b_np = topi.testing.upsampling_python(a_np, (scale_h, scale_w), layout)
+ b_np = tvm.topi.testing.upsampling_python(a_np, (scale_h, scale_w), layout)
def check_device(device):
ctx = tvm.context(device, 0)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(B)
+ s = tvm.topi.testing.get_injective_schedule(device)(B)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx)
f = tvm.build(s, [A, B], device)
coordinate_transformation_mode=coordinate_transformation_mode, method=method)
if method == "trilinear":
- b_np = topi.testing.trilinear_resize3d_python(a_np, (out_depth, out_height, out_width), layout,
+ b_np = tvm.topi.testing.trilinear_resize3d_python(a_np, (out_depth, out_height, out_width), layout,
coordinate_transformation_mode)
else:
scale_d = out_depth / in_depth
scale_h = out_height / in_height
scale_w = out_width / in_width
- b_np = topi.testing.upsampling3d_python(a_np, (scale_d, scale_h, scale_w), layout)
+ b_np = tvm.topi.testing.upsampling3d_python(a_np, (scale_d, scale_h, scale_w), layout)
def check_device(device):
ctx = tvm.context(device, 0)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(B)
+ s = tvm.topi.testing.get_injective_schedule(device)(B)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx)
f = tvm.build(s, [A, B], device)
out = topi.image.crop_and_resize(images, boxes, box_ind, np_crop_size, layout=layout,
method=method, extrapolation_value=extrapolation_value)
- baseline_np = topi.testing.crop_and_resize_python(np_images, np_boxes, np_box_indices,
+ baseline_np = tvm.topi.testing.crop_and_resize_python(np_images, np_boxes, np_box_indices,
np_crop_size, layout, method,
extrapolation_value)
def check_device(device):
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(out)
+ s = tvm.topi.testing.get_injective_schedule(device)(out)
tvm_images = tvm.nd.array(np_images, ctx)
tvm_boxes = tvm.nd.array(np_boxes, ctx)
tvm_indices = tvm.nd.array(np_box_indices, ctx)
@memoize("topi.tests.test_affine_grid.verify_affine_grid")
def get_ref_data():
data_np = np.random.uniform(size=data_shape).astype(dtype)
- out_np = topi.testing.affine_grid_python(data_np, target_shape)
+ out_np = tvm.topi.testing.affine_grid_python(data_np, target_shape)
return data_np, out_np
data_np, out_np = get_ref_data()
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(out)
+ s = tvm.topi.testing.get_injective_schedule(device)(out)
tvm_data = tvm.nd.array(data_np, ctx)
tvm_out = tvm.nd.empty(out_np.shape, dtype, ctx)
f = tvm.build(s, [data, out], device)
data_np = np.random.uniform(size=data_shape).astype(dtype)
# allow grid values to be out-of-bound
grid_np = np.random.uniform(size=grid_shape, low=-1.5, high=1.5).astype(dtype)
- out_np = topi.testing.grid_sample_nchw_python(data_np, grid_np, 'bilinear')
+ out_np = tvm.topi.testing.grid_sample_nchw_python(data_np, grid_np, 'bilinear')
return data_np, grid_np, out_np
data_np, grid_np, out_np = get_ref_data()
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(out)
+ s = tvm.topi.testing.get_injective_schedule(device)(out)
tvm_data = tvm.nd.array(data_np, ctx)
tvm_grid = tvm.nd.array(grid_np, ctx)
tvm_out = tvm.nd.empty(out_np.shape, dtype, ctx)
import numpy as np
import tvm
from tvm import te
-import topi
-from topi.util import get_const_tuple
-import topi.testing
+from tvm import topi
+from tvm.topi.util import get_const_tuple
+import tvm.topi.testing
_lrn_schedule = {
"generic": topi.generic.schedule_lrn,
dtype = A.dtype
a_np = np.random.uniform(size=shape).astype(dtype)
- b_np = topi.testing.lrn_python(a_np, size, axis, bias, alpha, beta)
+ b_np = tvm.topi.testing.lrn_python(a_np, size, axis, bias, alpha, beta)
def check_device(device):
if not tvm.runtime.enabled(device):
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s_func = topi.testing.dispatch(device, _lrn_schedule)
+ s_func = tvm.topi.testing.dispatch(device, _lrn_schedule)
s = s_func([B])
ctx = tvm.context(device, 0)
a = tvm.nd.array(a_np, ctx)
from scipy import special
import tvm
from tvm import te
-import topi
-import topi.testing
-from topi import util
+from tvm import topi
+import tvm.topi.testing
+from tvm.topi import util
from common import get_all_backend
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(B)
+ s = tvm.topi.testing.get_injective_schedule(device)(B)
foo = tvm.build(s, [A, B], device, name=name)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros_like(b_np), ctx)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(B)
+ s = tvm.topi.testing.get_injective_schedule(device)(B)
foo = tvm.build(s, [A, B], device, name="isnan")
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros_like(b_np), ctx)
print("Skip because %s is not enabled" % device)
return
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(B)
+ s = tvm.topi.testing.get_injective_schedule(device)(B)
foo = tvm.build(s, [A, B], device, name=name)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros_like(b_np), ctx)
continue
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(B)
+ s = tvm.topi.testing.get_injective_schedule(device)(B)
foo = tvm.build(s, [A, B], device)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.empty(shape=shape, dtype=to_dtype, ctx=ctx)
import numpy as np
import tvm
from tvm import te
-import topi
-from topi.util import get_const_tuple
+from tvm import topi
+from tvm.topi.util import get_const_tuple
def with_tvm(lam, *args):
""" Take numpy arrays as args, convert them to TVM tensors and call `lam`.
import numpy as np
import tvm
from tvm import te
-import topi
-import topi.testing
-from topi.util import get_const_tuple
+from tvm import topi
+import tvm.topi.testing
+from tvm.topi.util import get_const_tuple
from common import get_all_backend
_pool_schedule = {
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s_func = topi.testing.dispatch(device, _pool_schedule)
+ s_func = tvm.topi.testing.dispatch(device, _pool_schedule)
s = s_func(B, layout)
a = tvm.nd.array(a_np, ctx)
a_np = np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype)
out_grad_np = np.random.uniform(low=0.001, size=bshape).astype(dtype)
- pool_grad_np = topi.testing.pool_grad_nchw(a_np, out_grad_np, pool_size=(kh, kw),
+ pool_grad_np = tvm.topi.testing.pool_grad_nchw(a_np, out_grad_np, pool_size=(kh, kw),
strides=(sh, sw), padding=padding,
pool_type=pool_type, ceil_mode=ceil_mode,
count_include_pad=count_include_pad)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s_func = topi.testing.dispatch(device, _pool_grad_schedule)
+ s_func = tvm.topi.testing.dispatch(device, _pool_grad_schedule)
s = s_func(PoolGrad)
a = tvm.nd.array(a_np, ctx)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s_func = topi.testing.dispatch(device, _adaptive_pool_schedule)
+ s_func = tvm.topi.testing.dispatch(device, _adaptive_pool_schedule)
if device == "cuda":
s = s_func(B, layout)
else:
def verify_adaptive_pool(dshape, out_size, pool_type, layout="NCHW", dtype="float32"):
"""verify function of adaptive_pool"""
np_data = np.random.uniform(low=0, high=255, size=dshape).astype(dtype)
- np_out = topi.testing.adaptive_pool(np_data, out_size, pool_type, layout)
+ np_out = tvm.topi.testing.adaptive_pool(np_data, out_size, pool_type, layout)
oshape = np_out.shape
data = te.placeholder(dshape, name="data", dtype=dtype)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s_func = topi.testing.dispatch(device, _adaptive_pool_schedule)
+ s_func = tvm.topi.testing.dispatch(device, _adaptive_pool_schedule)
if device == "cuda":
s = s_func(out, layout)
else:
output_shape = [int(i) for i in B.shape]
input_np = np.random.uniform(low=0.001, size=input_shape).astype(dtype)
- ref_np = topi.testing.pool3d_ncdhw_python(input_np, kernel, stride, padding,
+ ref_np = tvm.topi.testing.pool3d_ncdhw_python(input_np, kernel, stride, padding,
output_shape, pool_type, count_include_pad, ceil_mode)
def check_device(device):
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s_func = topi.testing.dispatch(device, _pool_schedule)
+ s_func = tvm.topi.testing.dispatch(device, _pool_schedule)
s = s_func(B, layout)
a = tvm.nd.array(input_np, ctx)
output_shape = [int(i) for i in B.shape]
input_np = np.random.uniform(low=0.001, size=input_shape).astype(dtype)
- ref_np = topi.testing.pool1d_ncw_python(input_np, kernel, stride, padding,
+ ref_np = tvm.topi.testing.pool1d_ncw_python(input_np, kernel, stride, padding,
output_shape, pool_type, count_include_pad, ceil_mode)
def check_device(device):
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s_func = topi.testing.dispatch(device, _pool_schedule)
+ s_func = tvm.topi.testing.dispatch(device, _pool_schedule)
s = s_func(B, layout)
a = tvm.nd.array(input_np, ctx)
import numpy as np
import tvm
from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from common import get_all_backend
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_reduce_schedule(device)(B)
+ s = tvm.topi.testing.get_reduce_schedule(device)(B)
foo = tvm.build(s, [A, B], device, name=type)
# Test
import numpy as np
import tvm
from tvm import te
-import topi
-import topi.testing
-from topi.util import get_const_tuple
+from tvm import topi
+import tvm.topi.testing
+from tvm.topi.util import get_const_tuple
from tvm.contrib.nvcc import have_fp16
from common import get_all_backend
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_elemwise_schedule(device)(B)
+ s = tvm.topi.testing.get_elemwise_schedule(device)(B)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)
# under the License.
"""Example code to do reorg."""
import numpy as np
-import topi
-from topi.util import get_const_tuple
+from tvm import topi
+from tvm.topi.util import get_const_tuple
import tvm
from tvm import te
-import topi.testing
+import tvm.topi.testing
_reorg_schedule = {
"generic": topi.generic.schedule_reorg,
def get_ref_data_reorg():
a_np = np.random.uniform(size=a_shape).astype(dtype)
- b_np = topi.testing.reorg_python(a_np, stride)
+ b_np = tvm.topi.testing.reorg_python(a_np, stride)
return a_np, b_np
a_np, b_np = get_ref_data_reorg()
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s_func = topi.testing.dispatch(device, _reorg_schedule)
+ s_func = tvm.topi.testing.dispatch(device, _reorg_schedule)
s = s_func([B])
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)
import numpy as np
import tvm
from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
import logging
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
from common import get_all_backend
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s_func = topi.testing.dispatch(device, _softmax_schedule)
+ s_func = tvm.topi.testing.dispatch(device, _softmax_schedule)
s = s_func(B)
a = tvm.nd.array(a_np, ctx)
tvm.lower(s, [A, B], simple_mode=True)
a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
- b_np = topi.testing.softmax_python(a_np)
+ b_np = tvm.topi.testing.softmax_python(a_np)
for device in get_all_backend():
check_device(A, B, a_np, b_np, device, "softmax")
_, c, h, w = shape
a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
- b_np = topi.testing.softmax_python(a_np.transpose(0, 2, 3, 1).reshape(h*w, c))
+ b_np = tvm.topi.testing.softmax_python(a_np.transpose(0, 2, 3, 1).reshape(h*w, c))
b_np = b_np.reshape(1, h, w, c).transpose(0, 3, 1, 2)
for device in get_all_backend():
s = te.create_schedule([B.op])
tvm.lower(s, [A, B], simple_mode=True)
a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
- b_np = topi.testing.log_softmax_python(a_np)
+ b_np = tvm.topi.testing.log_softmax_python(a_np)
for device in get_all_backend():
check_device(A, B, a_np, b_np, device, "log_softmax")
import numpy as np
import tvm
from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
_argsort_implement = {
"generic": (topi.argsort, topi.generic.schedule_argsort),
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- fcompute, fschedule = topi.testing.dispatch(device, _argsort_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _argsort_implement)
out = fcompute(data, axis=axis, is_ascend=is_ascend)
s = fschedule(out)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- fcompute, fschedule = topi.testing.dispatch(device, _topk_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _topk_implement)
outs = fcompute(data, k, axis, ret_type, is_ascend, dtype)
outs = outs if isinstance(outs, list) else [outs]
s = fschedule(outs)
import numpy as np
import tvm
from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from common import get_all_backend
B = topi.nn.space_to_depth(A, block_size=block_size, layout=layout)
if layout == 'NHWC':
a_np = np.transpose(a_np, axes=[0, 3, 1, 2])
- b_np = topi.testing.space_to_depth_python(a_np, block_size)
+ b_np = tvm.topi.testing.space_to_depth_python(a_np, block_size)
if layout == 'NHWC':
a_np = np.transpose(a_np, axes=[0, 2, 3, 1])
b_np = np.transpose(b_np, axes=[0, 2, 3, 1])
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(B)
+ s = tvm.topi.testing.get_injective_schedule(device)(B)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx)
f = tvm.build(s, [A, B], device)
import numpy as np
import tvm
from tvm import te
-import topi
-import topi.testing
-from topi.util import get_const_tuple
+from tvm import topi
+import tvm.topi.testing
+from tvm.topi.util import get_const_tuple
import tvm.contrib.sparse as tvmsp
from collections import namedtuple
import time
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
- fcompute, fschedule = topi.testing.dispatch(device, _sparse_dense_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _sparse_dense_implement)
with tvm.target.create(device):
Y = fcompute(X, W_data, W_indices, W_indptr)
if use_relu:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
- fcompute, fschedule = topi.testing.dispatch(device, _sparse_dense_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _sparse_dense_implement)
with tvm.target.create(device):
Y = fcompute(X, W_data, W_indices, W_indptr)
s = fschedule([Y])
import numpy as np
import tvm
from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.contrib.nvcc import have_fp16
A = te.placeholder((n, m), name='A', dtype=dtype)
B = te.compute((n, m), lambda i, j:
A[i, j] + tvm.tir.const(1, A.dtype), name='B')
- S = topi.testing.get_elemwise_schedule(device)(B)
+ S = tvm.topi.testing.get_elemwise_schedule(device)(B)
fun = tvm.build(S, [A, B], device)
np_A = tvm.nd.empty((n, m), A.dtype, ctx).copyfrom(
import pytest
import tvm
from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from tvm.contrib.nvcc import have_fp16
from common import get_all_backend
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_broadcast_schedule(device)(B)
+ s = tvm.topi.testing.get_broadcast_schedule(device)(B)
foo = tvm.build(s, [A, B], device, name="expand_dims")
data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = data_npy.reshape(out_shape)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_elemwise_schedule(device)(B)
+ s = tvm.topi.testing.get_elemwise_schedule(device)(B)
foo = tvm.build(s, [A, B], device, name="reinterpret")
data_npy = generator(in_shape).astype(in_dtype)
out_npy = data_npy.view(B.dtype)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(B)
+ s = tvm.topi.testing.get_injective_schedule(device)(B)
foo = tvm.build(s, [A, B], device, name="transpose")
data_npy = np.arange(np.prod(in_shape)).reshape(in_shape).astype(A.dtype)
out_npy = data_npy.transpose(axes)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(B)
+ s = tvm.topi.testing.get_injective_schedule(device)(B)
foo = tvm.build(s, [A, B], device, name="reshape")
data_npy = np.random.normal(size=src_shape).astype(A.dtype)
out_npy = np.reshape(data_npy, newshape=dst_shape)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(B)
+ s = tvm.topi.testing.get_injective_schedule(device)(B)
foo = tvm.build(s, [A, B], device, name="squeeze")
data_npy = np.random.normal(size=src_shape).astype(A.dtype)
for key in target.keys:
if key in schedule_map:
return schedule_map[key]
- return topi.testing.get_injective_schedule(target)
+ return tvm.topi.testing.get_injective_schedule(target)
tensor_l = []
for i, shape in enumerate(shapes):
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_broadcast_schedule(device)(out_tensor)
+ s = tvm.topi.testing.get_broadcast_schedule(device)(out_tensor)
foo = tvm.build(s, tensor_l + [out_tensor], device, name="stack")
data_npys = [np.random.normal(size=shape).astype(tensor_l[0].dtype) for shape in shapes]
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(tensor_l)
+ s = tvm.topi.testing.get_injective_schedule(device)(tensor_l)
foo = tvm.build(s, [A] + list(tensor_l), device, name="split")
data_npy = np.random.normal(size=src_shape).astype(A.dtype)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(B)
+ s = tvm.topi.testing.get_injective_schedule(device)(B)
foo = tvm.build(s, [A, B], device, name="reverse")
x_np = np.random.uniform(size=in_shape).astype(A.dtype)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(C)
+ s = tvm.topi.testing.get_injective_schedule(device)(C)
foo = tvm.build(s, [A, B, C], device, name="reverse_sequence")
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(out_tensor)
+ s = tvm.topi.testing.get_injective_schedule(device)(out_tensor)
foo = tvm.build(s, [A] + [indices] + [out_tensor] , device, name="take")
shape_size = 1
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(B)
+ s = tvm.topi.testing.get_injective_schedule(device)(B)
foo = tvm.build(s, [A, B], device, name="stride_slice")
x_np = np.random.uniform(size=in_shape).astype(A.dtype)
- out_npy = topi.testing.strided_slice_python(
+ out_npy = tvm.topi.testing.strided_slice_python(
x_np, begin, end, strides) + 1
data_nd = tvm.nd.array(x_np, ctx)
out_nd = tvm.nd.empty(out_npy.shape, ctx=ctx, dtype=A.dtype)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(B)
+ s = tvm.topi.testing.get_injective_schedule(device)(B)
if strides is not None:
foo = tvm.build(s, [A, V, b, e, st, B], device, name="stride_set")
v_np = np.random.uniform(size=v_shape).astype(V.dtype)
b_np = np.asarray(begin).astype('int32')
e_np = np.asarray(end).astype('int32')
- out_npy = topi.testing.strided_set_python(
+ out_npy = tvm.topi.testing.strided_set_python(
x_np, v_np, begin, end, strides) + 1
data_nd = tvm.nd.array(x_np, ctx)
v_nd = tvm.nd.array(v_np, ctx)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(out_tensor)
+ s = tvm.topi.testing.get_injective_schedule(device)(out_tensor)
func = tvm.build(s, [var_data, var_indices, out_tensor] , device, name="gather")
- out_npys = topi.testing.gather_python(data, axis, indices)
+ out_npys = tvm.topi.testing.gather_python(data, axis, indices)
data_nd = tvm.nd.array(data, ctx)
indices_nd = tvm.nd.array(indices, ctx)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(out_tensor)
+ s = tvm.topi.testing.get_injective_schedule(device)(out_tensor)
func = tvm.build(s, [A, indices, out_tensor] , device, name="take")
shape_size = 1
for i in range(len(src_shape)):
shape_size = shape_size * src_shape[i]
data_npy = np.arange(shape_size, dtype=src_dtype).reshape((src_shape))
- out_npys = topi.testing.gather_nd_python(data_npy, indices_src)
+ out_npys = tvm.topi.testing.gather_nd_python(data_npy, indices_src)
data_nd = tvm.nd.array(data_npy, ctx)
indices_nd = tvm.nd.array(indices_src, ctx)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(A)
+ s = tvm.topi.testing.get_injective_schedule(device)(A)
f = tvm.build(s, [A], device, name="arange")
a_nd = tvm.nd.empty(a_np.shape, dtype='float32', ctx=ctx)
f(a_nd)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_broadcast_schedule(device)(B)
+ s = tvm.topi.testing.get_broadcast_schedule(device)(B)
foo = tvm.build(s, [A, B], device, name="repeat")
data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = np.repeat(data_npy, repeats, axis)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_broadcast_schedule(device)(B)
+ s = tvm.topi.testing.get_broadcast_schedule(device)(B)
foo = tvm.build(s, [A, B], device, name="tile")
data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = np.tile(data_npy, reps)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_broadcast_schedule(device)(C)
+ s = tvm.topi.testing.get_broadcast_schedule(device)(C)
f = tvm.build(s, [Cond, A, B, C], device, name="where")
cond_npy = np.random.uniform(low=-1, high=1, size=in_shape).astype(dtype)
x_npy = np.random.uniform(size=in_shape).astype(dtype)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(one_hot_result)
+ s = tvm.topi.testing.get_injective_schedule(device)(one_hot_result)
fn = tvm.build(s, [indices, one_hot_result], device, name="one_hot")
indices_npy = np.random.randint(0, depth, size=indices_shape).astype(indices.dtype)
- out_npy = topi.testing.one_hot(indices_npy, on_value, off_value, depth, axis, dtype)
+ out_npy = tvm.topi.testing.one_hot(indices_npy, on_value, off_value, depth, axis, dtype)
indices_nd = tvm.nd.array(indices_npy, ctx)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(one_hot_result.dtype), ctx)
fn(indices_nd, out_nd)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(Z)
+ s = tvm.topi.testing.get_injective_schedule(device)(Z)
foo = tvm.build(s, [X, Y, Z], device, name="unravel_index")
out_npy = np.unravel_index(x_data, y_data)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(D)
+ s = tvm.topi.testing.get_injective_schedule(device)(D)
foo = tvm.build(s, args + [D], device, name="sparse_to_dense")
ctx = tvm.context(device, 0)
if ctx.exist:
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(C)
+ s = tvm.topi.testing.get_injective_schedule(device)(C)
func = tvm.build(s, [A, C])
a = tvm.nd.array(np.array((1, 2)).astype('float32'), ctx=ctx)
c = tvm.nd.empty((1,), dtype='float32', ctx=ctx)
tvm_output = tvm.nd.empty(output.shape, ctx=ctx, dtype=B.dtype)
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(B)
+ s = tvm.topi.testing.get_injective_schedule(device)(B)
f = tvm.build(s, [A, B], device, name="layout_transform")
f(tvm_input, tvm_output)
tvm.testing.assert_allclose(tvm_output.asnumpy(), output)
tvm_output = tvm.nd.empty(output.shape, ctx=ctx, dtype=dtype)
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(B)
+ s = tvm.topi.testing.get_injective_schedule(device)(B)
f = tvm.build(s, [A, B], device, name="shape")
f(tvm_input, tvm_output)
tvm.testing.assert_allclose(tvm_output.asnumpy(), output)
C = topi.sequence_mask(A, B, axis=axis, mask_value=mask_value)
A_data = np.random.normal(0, 1, in_shape).astype(np.float32)
B_data = np.random.randint(1, max_length, (batch_size,)).astype(np.int32)
- C_gt_data = topi.testing.sequence_mask(A_data, B_data, mask_value, axis)
+ C_gt_data = tvm.topi.testing.sequence_mask(A_data, B_data, mask_value, axis)
def check_device(device):
ctx = tvm.context(device, 0)
tvm_C = tvm.nd.empty(in_shape, ctx=ctx, dtype="float32")
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(C)
+ s = tvm.topi.testing.get_injective_schedule(device)(C)
f = tvm.build(s, [A, B, C], device, name="SequenceMask")
f(tvm_A, tvm_B, tvm_C)
tvm.testing.assert_allclose(tvm_C.asnumpy(), C_gt_data)
tvm_output = tvm.nd.empty((), ctx=ctx, dtype=B.dtype)
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(B)
+ s = tvm.topi.testing.get_injective_schedule(device)(B)
f = tvm.build(s, [A, B], device, name="ndarray_size")
f(tvm_input, tvm_output)
tvm.testing.assert_allclose(tvm_output.asnumpy(), output)
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
- conv2d_compute, conv2d_schedule = topi.testing.get_conv2d_nchw_implement(device)
+ conv2d_compute, conv2d_schedule = tvm.topi.testing.get_conv2d_nchw_implement(device)
data = te.placeholder((2, 1, 2, 4), 'int8', 'data')
w = te.placeholder((3, 1, 2, 2), 'int8', 'w')
conv1 = conv2d_compute(data, w, 1, 0, 1, 'int32')
import numpy as np
import tvm
from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
import math
-from topi.util import nchw_pack_layout
+from tvm.topi.util import nchw_pack_layout
from common import get_all_backend
if method == "bilinear":
out_size = (int(round(in_height*scale_h)), int(round(in_width*scale_w)))
- b_np = topi.testing.bilinear_resize_python(a_np, out_size, layout, "asymmetric")
+ b_np = tvm.topi.testing.bilinear_resize_python(a_np, out_size, layout, "asymmetric")
else:
- b_np = topi.testing.upsampling_python(a_np, (scale_h, scale_w), layout)
+ b_np = tvm.topi.testing.upsampling_python(a_np, (scale_h, scale_w), layout)
def check_device(device):
ctx = tvm.context(device, 0)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(B)
+ s = tvm.topi.testing.get_injective_schedule(device)(B)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx)
f = tvm.build(s, [A, B], device)
if method == "trilinear":
out_size = (int(round(in_depth*scale_d)), int(round(in_height*scale_h)), int(round(in_width*scale_w)))
- b_np = topi.testing.trilinear_resize3d_python(a_np, out_size, layout,
+ b_np = tvm.topi.testing.trilinear_resize3d_python(a_np, out_size, layout,
coordinate_transformation_mode="half_pixel")
else:
- b_np = topi.testing.upsampling3d_python(a_np, (scale_d, scale_h, scale_w), layout)
+ b_np = tvm.topi.testing.upsampling3d_python(a_np, (scale_d, scale_h, scale_w), layout)
def check_device(device):
ctx = tvm.context(device, 0)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_injective_schedule(device)(B)
+ s = tvm.topi.testing.get_injective_schedule(device)(B)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx)
f = tvm.build(s, [A, B], device)
# under the License.
"""Test code for util"""
-import topi
+from tvm import topi
def verify_get_shape(src_shape, src_layout, dst_layout, expect_shape):
import numpy as np
import tvm
from tvm import te
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
-from topi.util import get_const_tuple
-from topi.vision import ssd, non_max_suppression, get_valid_counts
+from tvm.topi.util import get_const_tuple
+from tvm.topi.vision import ssd, non_max_suppression, get_valid_counts
_get_valid_counts_implement = {
"generic": (topi.vision.get_valid_counts, topi.generic.schedule_get_valid_counts),
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- fcompute, fschedule = topi.testing.dispatch(device, _get_valid_counts_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _get_valid_counts_implement)
data = te.placeholder(dshape, name="data", dtype=dtype)
outs = fcompute(data, score_threshold, id_index, score_index)
s = fschedule(outs)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- fcompute, fschedule = topi.testing.dispatch(device, _nms_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _nms_implement)
out = fcompute(data, valid_count, indices, max_output_size, iou_threshold, force_suppress,
top_k, coord_start=coord_start, score_index=score_index, id_index=id_index,
return_indices=False)
return
print("Running on target: %s" % device)
- fcompute, fschedule = topi.testing.dispatch(device, _multibox_prior_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _multibox_prior_implement)
with tvm.target.create(device):
out = fcompute(data, sizes, ratios, steps, offsets, clip)
s = fschedule(out)
return
print("Running on target: %s" % device)
- fcompute, fschedule = topi.testing.dispatch(device, _multibox_detection_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _multibox_detection_implement)
with tvm.target.create(device):
out = fcompute(cls_prob, loc_preds, anchors)
s = fschedule(out)
a_np = np.random.uniform(size=a_shape).astype('float32')
rois_np = np.random.uniform(size=rois_shape).astype('float32') * in_size
rois_np[:, 0] = np.random.randint(low = 0, high = batch, size = num_roi)
- b_np = topi.testing.roi_align_nchw_python(a_np, rois_np, pooled_size=pooled_size,
+ b_np = tvm.topi.testing.roi_align_nchw_python(a_np, rois_np, pooled_size=pooled_size,
spatial_scale=spatial_scale,
sample_ratio=sample_ratio)
print("Running on target: %s" % device)
with tvm.target.create(device):
- fcompute, fschedule = topi.testing.dispatch(device, _roi_align_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _roi_align_implement)
b = fcompute(a, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale,
sample_ratio=sample_ratio)
rois_np = np.random.uniform(size=rois_shape).astype('float32') * in_size
rois_np[:, 0] = np.random.randint(low = 0, high = batch, size = num_roi).astype('float32')
- b_np = topi.testing.roi_pool_nchw_python(a_np, rois_np, pooled_size=pooled_size,
+ b_np = tvm.topi.testing.roi_pool_nchw_python(a_np, rois_np, pooled_size=pooled_size,
spatial_scale=spatial_scale)
return a_np, rois_np, b_np
with tvm.target.create(device):
b = topi.vision.rcnn.roi_pool_nchw(a, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale)
- s_func = topi.testing.dispatch(device, _roi_pool_schedule)
+ s_func = tvm.topi.testing.dispatch(device, _roi_pool_schedule)
s = s_func(b)
tvm_a = tvm.nd.array(a_np, ctx)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- fcompute, fschedule = topi.testing.dispatch(device, _proposal_implement)
+ fcompute, fschedule = tvm.topi.testing.dispatch(device, _proposal_implement)
out = fcompute(cls_prob, bbox_pred, im_info, **attrs)
s = fschedule(out)
f = tvm.build(s, [cls_prob, bbox_pred, im_info, out], device)
import threading
from tvm import te, auto_scheduler
-import topi
+from tvm import topi
@auto_scheduler.register_workload
"""Test ComputeDAG (replay, infer bound)"""
-import tvm, topi
+import tvm
+from tvm import topi
from tvm import auto_scheduler, te
from test_auto_scheduler_common import get_tiled_matmul, matmul_auto_scheduler_test
import tvm
from tvm import auto_scheduler, te
-import topi
+from tvm import topi
from test_auto_scheduler_common import matmul_auto_scheduler_test, conv2d_nchw_bn_relu
""" Test measurement and log serialization. """
import tvm
-import topi
+from tvm import topi
from tvm import te, auto_scheduler
import tempfile
import tvm
from tvm import te
from tvm.contrib import graph_runtime, util
-import topi
+from tvm import topi
def get_simplex_graph(host_dev_type, device_dev_type):
r""" Return the hand-crafted json object where only one copy node is
import tvm
from tvm import te
import numpy as np
-import topi
+from tvm import topi
import unittest
from tvm.contrib.nvcc import have_fp16, have_int8
from tvm.contrib import nvcc
dtype = 'float32'
target = 'cuda'
-
+
## Compute declaration
N = 128
A = te.placeholder((N, N), name='A')
B = te.placeholder((N, N), name='B')
k = te.reduce_axis((0, N), name='k')
C = te.compute((N, N), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name='C')
-
+
## Schedule
s = te.create_schedule([C.op])
CC = s.cache_write(C, "local")
ko, ki = s[CC].split(k, 2)
s[CC].unroll(ki)
s[CC].vectorize(j)
-
+
## Check correctness
ctx = tvm.context(target)
a_tvm = tvm.nd.array(np.ones((N, N)).astype(dtype), ctx=ctx)
# under the License.
import tvm
from tvm import te
-import topi
+from tvm import topi
from tvm.contrib import util, clang
import numpy as np
import ctypes
import tvm
from tvm import te
from ctypes import *
-import topi
+from tvm import topi
import numpy as np
tgt = "llvm"
import tvm
from tvm import te
from tvm.testing import check_numerical_grads, assert_allclose
-import topi
-from topi.util import get_const_tuple
+from tvm import topi
+from tvm.topi.util import get_const_tuple
import numpy as np
# under the License.
import tvm
from tvm import te
-import topi
+from tvm import topi
import numpy as np
from tvm.contrib import nvcc
import tvm
from tvm import te
import numpy as np
-from topi.testing import conv2d_nhwc_python
+from tvm.topi.testing import conv2d_nhwc_python
from tvm.contrib import nvcc
VERIFY = True
import tvm
import numpy as np
from tvm import te
-from topi.nn.pooling import pool
+from tvm.topi.nn.pooling import pool
def test_tensor():
m = te.size_var('m')
import numpy as np
import tvm
from tvm import te
-import topi
-import topi.testing
-from topi.util import get_const_tuple
+from tvm import topi
+import tvm.topi.testing
+from tvm.topi.util import get_const_tuple
def test_operator_type_and_tags():
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_elemwise_schedule(device)(B)
+ s = tvm.topi.testing.get_elemwise_schedule(device)(B)
k_ = 2
foo = tvm.build(s, [A, B, k] + sh, device, name="tensor_scalar_" + typ)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
- s = topi.testing.get_broadcast_schedule(device)(C)
+ s = tvm.topi.testing.get_broadcast_schedule(device)(C)
foo = tvm.build(s, [A, B, C], device, name="broadcast_binary" + "_" + typ)
lhs_npy = np.random.uniform(size=lhs_shape).astype(A.dtype)
return
print("Running on target: %s" % device)
- conv2d_nchw, schedule_conv2d_nchw = topi.testing.get_conv2d_nchw_implement(device)
+ conv2d_nchw, schedule_conv2d_nchw = tvm.topi.testing.get_conv2d_nchw_implement(device)
k = 10.0
dilation = (1, 1)
a_npy = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
w_npy = np.random.uniform(size=get_const_tuple(W.shape)).astype(W.dtype)
- b_npy = topi.testing.conv2d_nchw_python(a_npy, w_npy, stride, padding)
+ b_npy = tvm.topi.testing.conv2d_nchw_python(a_npy, w_npy, stride, padding)
c_npy = np.random.uniform(size=get_const_tuple(B.shape)).astype(B.dtype)
if typ == "add":
c_npy = b_npy + k
import tvm
from tvm import te
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
def test_layout():
layout = tvm.tir.layout("NCHW16c")
# under the License.
import tvm
from tvm import te
-import topi
+from tvm import topi
from tvm.contrib import util, clang
import numpy as np
import ctypes
f(a, b)
tvm.testing.assert_allclose(
b.asnumpy(), np_func(a.asnumpy()), atol=1e-5, rtol=1e-5)
-
+
for func in test_funcs:
run_test(*func)
# specific language governing permissions and limitations
# under the License.
import tvm
-import topi
+from tvm import topi
from tvm import te
def test_multilevel_splitting_with_indivisble_factors():
- import topi
+ from tvm import topi
A = te.placeholder((130,), dtype="float32")
B = topi.nn.relu(A)
s = te.create_schedule(B.op)
set -u
export TVM_PATH=`pwd`
-export PYTHONPATH=${TVM_PATH}/python:${TVM_PATH}/topi/python
+export PYTHONPATH=${TVM_PATH}/python
export LD_LIBRARY_PATH="lib:${LD_LIBRARY_PATH:-}"
tvm_root="$(git rev-parse --show-toplevel)"
-export PYTHONPATH="$tvm_root/python":"$tvm_root/topi/python"
+export PYTHONPATH="$tvm_root/python"
# to avoid CI CPU thread throttling.
export TVM_BIND_THREADS=0
# cleanup pycache
find . -type f -path "*.pyc" | xargs rm -f
-python3 -m pytest topi/tests/python
+python3 -m pytest tests/python/topi/
export TVM_HOME="$(git rev-parse --show-toplevel)"
export LD_LIBRARY_PATH="$TVM_HOME/lib:$TVM_HOME/build:${LD_LIBRARY_PATH:-}"
-export PYTHONPATH="$TVM_HOME/python":"$TVM_HOME/topi/python"
+export PYTHONPATH="$TVM_HOME/python"
export RUST_DIR="$TVM_HOME/rust"
+++ /dev/null
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-# pylint: disable=invalid-name, exec-used
-"""Setup TOPI package."""
-from __future__ import absolute_import
-import os
-import shutil
-import sys
-
-from setuptools import find_packages
-from setuptools.dist import Distribution
-
-if "--inplace" in sys.argv:
- from distutils.core import setup
- from distutils.extension import Extension
-else:
- from setuptools import setup
- from setuptools.extension import Extension
-
-CURRENT_DIR = os.path.dirname(__file__)
-
-
-def get_lib_names():
- if sys.platform.startswith('win32'):
- return ['libtvm_topi.dll', 'tvm_topi.dll']
- if sys.platform.startswith('darwin'):
- return ['libtvm_topi.dylib', 'tvm_topi.dylib']
- return ['libtvm_topi.so', 'tvm_topi.so']
-
-
-def get_lib_path():
- """Get library path, name and version"""
- # We can not import `libinfo.py` in setup.py directly since __init__.py
- # Will be invoked which introduces dependences
- libinfo_py = os.path.join(CURRENT_DIR, '../../python/tvm/_ffi/libinfo.py')
- libinfo = {'__file__': libinfo_py}
- exec(compile(open(libinfo_py, "rb").read(),
- libinfo_py, 'exec'), libinfo, libinfo)
- version = libinfo['__version__']
- if not os.getenv('CONDA_BUILD'):
- lib_path = libinfo['find_lib_path'](get_lib_names())
- libs = [lib_path[0]]
- if libs[0].find("runtime") == -1:
- for name in lib_path[1:]:
- if name.find("runtime") != -1:
- libs.append(name)
- break
- else:
- libs = None
- return libs, version
-
-
-LIB_LIST, __version__ = get_lib_path()
-
-if not os.getenv('CONDA_BUILD'):
- curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
- for i, path in enumerate(LIB_LIST):
- LIB_LIST[i] = os.path.relpath(path, curr_path)
- setup_kwargs = {
- "include_package_data": True,
- "data_files": [('topi', LIB_LIST)]
- }
-else:
- setup_kwargs = {}
-
-
-include_libs = False
-wheel_include_libs = False
-if not os.getenv('CONDA_BUILD'):
- if "bdist_wheel" in sys.argv:
- wheel_include_libs = True
- else:
- include_libs = True
-
-# For bdist_wheel only
-if wheel_include_libs:
- with open("MANIFEST.in", "w") as fo:
- for path in LIB_LIST:
- shutil.copy(path, os.path.join(CURRENT_DIR, 'topi'))
- _, libname = os.path.split(path)
- fo.write("include topi/%s\n" % libname)
- setup_kwargs = {
- "include_package_data": True
- }
-
-if include_libs:
- curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
- for i, path in enumerate(LIB_LIST):
- LIB_LIST[i] = os.path.relpath(path, curr_path)
- setup_kwargs = {
- "include_package_data": True,
- "data_files": [('topi', LIB_LIST)]
- }
-
-setup(name='topi',
- version=__version__,
- description="TOPI: TVM operator index",
- install_requires=[
- "numpy",
- "decorator",
- ],
- packages=find_packages(),
- url='https://github.com/apache/incubator-tvm',
- **setup_kwargs)
-
-
-if wheel_include_libs:
- # Wheel cleanup
- os.remove("MANIFEST.in")
- for path in LIB_LIST:
- _, libname = os.path.split(path)
- os.remove("topi/%s" % libname)
+++ /dev/null
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-"""Load Lib for C++ TOPI ops and schedules"""
-import sys
-import os
-import ctypes
-import tvm._ffi
-
-from tvm._ffi import libinfo
-
-def _get_lib_names():
- if sys.platform.startswith('win32'):
- return ['libtvm_topi.dll', 'tvm_topi.dll']
- if sys.platform.startswith('darwin'):
- return ['libtvm_topi.dylib', 'tvm_topi.dylib']
- return ['libtvm_topi.so', 'tvm_topi.so']
-
-def _load_lib():
- """Load libary by searching possible path."""
- curr_path = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))
- lib_search = [curr_path, os.path.dirname(curr_path)]
- lib_path = libinfo.find_lib_path(_get_lib_names(), lib_search, optional=True)
- if lib_path is None:
- return None, None
- lib = ctypes.CDLL(lib_path[0], ctypes.RTLD_GLOBAL)
- return lib, os.path.basename(lib_path[0])
-
-_LIB, _LIB_NAME = _load_lib()
-
-tvm._ffi._init_api("topi", "topi.cpp")
import tvm
from tvm import te
-import topi
-from topi.testing import conv2d_nchw_python
+from tvm import topi
+from tvm.topi.testing import conv2d_nchw_python
from tvm import autotvm
#
# .. code-block:: bash
#
-# echo 'export PYTHONPATH=/workspace/python:/workspace/topi/python:/workspace/vta/python:${PYTHONPATH}' >> ~/.bashrc
+# echo 'export PYTHONPATH=/workspace/python:/workspace/vta/python:${PYTHONPATH}' >> ~/.bashrc
# source ~/.bashrc
#################################################################
"""
import tvm
from tvm import te
-import topi
+from tvm import topi
from tvm.contrib import tedd
######################################################################
#
func = tvm.build(s, [A, B, C], target="llvm", name="gemv")
-from topi.util import get_const_tuple
+from tvm.topi.util import get_const_tuple
dtype = A.dtype
ctx = tvm.context("cpu", 0)
a = np.random.uniform(size=get_const_tuple(A.shape)).astype(dtype)
import tvm
from tvm import te
-import topi
+from tvm import topi
import numpy as np
######################################################################
__version__ = "0.1.0"
-# do not import topi when running vta.exec.rpc_server
+# do not from tvm import topi when running vta.exec.rpc_server
# to maintain minimum dependency on the board
if sys.argv[0] not in ("-c", "-m"):
from . import top
import tvm
from tvm import te
-from topi import util
+from tvm.topi import util
from tvm.relay.op.op import register_compute, register_injective_schedule
from tvm.relay.op.op import register_pattern, OpPattern
import tvm
from tvm import te
-import topi
+from tvm import topi
from tvm.relay.op import op as reg
from tvm.relay.op import strategy as _strategy
import tvm
from tvm import te
from tvm import autotvm
-import topi
+from tvm import topi
from .util import is_packed_layout
from ..environment import get_env
import tvm
from tvm import te
from tvm import autotvm
-import topi
-from topi.util import get_const_tuple
-from topi.nn.util import get_pad_tuple
+from tvm import topi
+from tvm.topi.util import get_const_tuple
+from tvm.topi.nn.util import get_pad_tuple
from ..environment import get_env
import tvm
from tvm import te
from tvm import autotvm
-import topi
+from tvm import topi
from ..environment import get_env
import tvm
from tvm import te
from tvm import autotvm
-import topi
+from tvm import topi
from ..environment import get_env
# pylint: disable=len-as-condition, no-else-return, unused-argument, invalid-name
import tvm
from tvm import te
-from topi import util
+from tvm.topi import util
from .environment import get_env
import tvm
from tvm import te
from tvm import autotvm
-import topi
+from tvm import topi
import vta
import vta.testing
import tvm
from tvm import te
from tvm import autotvm
-import topi
+from tvm import topi
import vta
import vta.testing
import tvm
from tvm import te
from tvm import autotvm
-import topi
+from tvm import topi
import vta
import vta.testing
import tvm
from tvm import te
from tvm import autotvm
-import topi
+from tvm import topi
import vta
import vta.testing
import numpy as np
from PIL import Image
-import topi
+from tvm import topi
import tvm
from tvm import te
from tvm import rpc, autotvm, relay
from tvm import autotvm
from tvm.contrib import util
from tvm.contrib.pickle_memoize import memoize
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
import vta
from vta import program_fpga, reconfig_runtime
import vta.testing
a_np = np.random.randint(a_min, a_max, size=a_shape).astype(data.dtype)
w_np = np.random.randint(w_min, w_max, size=w_shape).astype(kernel.dtype)
b_np = np.random.randint(b_min, b_max, size=b_shape).astype(env.acc_dtype)
- r_np = topi.testing.conv2d_nchw_python(
+ r_np = tvm.topi.testing.conv2d_nchw_python(
a_np.astype(env.acc_dtype), w_np.astype(env.acc_dtype), (wl.hstride, wl.wstride), wl.hpad).astype(env.acc_dtype)
return a_np, w_np, b_np, r_np
from tvm import autotvm
from tvm.contrib import util
from tvm.contrib.pickle_memoize import memoize
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
import vta
from vta import program_fpga, reconfig_runtime
import vta.testing
w_min, w_max = 0 - (1 << (env.WGT_WIDTH - 1)), (1 << (env.WGT_WIDTH - 1))
a_np = np.random.randint(a_min, a_max, size=a_shape).astype(data.dtype)
w_np = np.random.randint(w_min, w_max, size=(wl.in_filter, wl.out_filter, wl.hkernel, wl.wkernel)).astype(kernel.dtype)
- r_np = topi.testing.conv2d_transpose_nchw_python(
+ r_np = tvm.topi.testing.conv2d_transpose_nchw_python(
a_np.astype(env.acc_dtype), w_np.astype(env.acc_dtype), (wl.hstride, wl.wstride), wl.hpad, (wl.o_hpad, wl.o_wpad)).astype(env.acc_dtype)
return a_np, w_np, r_np
from tvm import autotvm
from tvm.contrib import util
from tvm.contrib.pickle_memoize import memoize
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
import vta
from vta import program_fpga, reconfig_runtime
import vta.testing
from tvm import relay
from tvm import autotvm
from tvm.contrib import util
-import topi
-import topi.testing
+from tvm import topi
+import tvm.topi.testing
import vta
from vta import program_fpga, reconfig_runtime
import vta.testing
a_np = np.random.randint(a_min, a_max, size=a_shape).astype(data.dtype)
w_np = np.random.randint(w_min, w_max, size=w_shape).astype(kernel.dtype)
b_np = np.random.randint(b_min, b_max, size=b_shape).astype(env.acc_dtype)
- r_np = topi.testing.conv2d_nchw_python(
+ r_np = tvm.topi.testing.conv2d_nchw_python(
a_np.astype(env.acc_dtype), w_np.astype(env.acc_dtype),
(wl.hstride, wl.wstride), wl.hpad, wl.groups).astype(env.acc_dtype)
return a_np, w_np, b_np, r_np
import tvm
from tvm import te
import numpy as np
-import topi
+from tvm import topi
from tvm.contrib import util
import vta
import numpy as np
from PIL import Image
-import topi
+from tvm import topi
import tvm
from tvm import te
from tvm import rpc, autotvm, relay
# :align: center
# :width: 480px
-import topi
+from tvm import topi
# 2D convolution layer dimensions taken from ResNet-18 architecture
# (9th convolutional layer)
# ensure correctness.
# This library facilitates 2D convolution testing
-from topi.testing import conv2d_nchw_python
+from tvm.topi.testing import conv2d_nchw_python
# Compile the TVM module
my_conv = vta.build(s, [data, kernel, res], "ext_dev", env.target_host, name="my_conv")