const std::string json_data(&build_graph_json[0],
&build_graph_json[0] + build_graph_json_len);
tvm::runtime::Module mod_syslib =
- (*tvm::runtime::Registry::Get("module._GetSystemLib"))();
+ (*tvm::runtime::Registry::Get("runtime.SystemLib"))();
int device_type = kDLCPU;
int device_id = 0;
tvm::runtime::Module mod =
Example Plugin Module
=====================
This folder contains an example that implements a C++ module
-that can be directly loaded as TVM's DSOModule (via tvm.module.load)
+that can be directly loaded as TVM's DSOModule (via tvm.runtime.load_module)
## Guideline
def test_plugin_module():
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
- mod = tvm.module.load(os.path.join(curr_path, "lib", "plugin_module.so"))
+ mod = tvm.runtime.load_module(os.path.join(curr_path, "lib", "plugin_module.so"))
# NOTE: we need to make sure all managed resources returned
# from mod get destructed before mod get unloaded.
#
B = tvm.compute((n,), lambda *i: A(*i) + 1.0, name='B')
s = tvm.create_schedule(B.op)
def check_llvm():
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
f = tvm.build(s, [A, B], "ext_dev", "llvm")
ctx = tvm.ext_dev(0)
s = tvm.create_schedule(B.op)
def check_llvm():
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
f = tvm.build(s, [A, B], "llvm")
ctx = tvm.cpu(0)
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
- *
+ *
* http://www.apache.org/licenses/LICENSE-2.0
- *
+ *
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// For libraries that are directly packed as system lib and linked together with the app
// We can directly use GetSystemLib to get the system wide library.
LOG(INFO) << "Verify load function from system lib";
- tvm::runtime::Module mod_syslib = (*tvm::runtime::Registry::Get("module._GetSystemLib"))();
+ tvm::runtime::Module mod_syslib = (*tvm::runtime::Registry::Get("runtime.SystemLib"))();
Verify(mod_syslib, "addonesys");
return 0;
}
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
f = mod.get_function(fname)
# Use tvm.nd.array to convert numpy ndarray to tvm
# NDArray type, so that function can be invoked normally
- N = 10
+ N = 10
x = tvm.nd.array(np.arange(N, dtype=np.float32))
y = tvm.nd.array(np.zeros(N, dtype=np.float32))
# Invoke the function
f(x, y)
- np_x = x.asnumpy()
- np_y = y.asnumpy()
+ np_x = x.asnumpy()
+ np_y = y.asnumpy()
# Verify correctness of function
assert(np.all([xi+1 == yi for xi, yi in zip(np_x, np_y)]))
print("Finish verification...")
-
+
if __name__ == "__main__":
# The normal dynamic loading method for deployment
- mod_dylib = tvm.module.load("lib/test_addone_dll.so")
+ mod_dylib = tvm.runtime.load_module("lib/test_addone_dll.so")
print("Verify dynamic loading from test_addone_dll.so")
verify(mod_dylib, "addone")
# There might be methods to use the system lib way in
def main():
ctx = tvm.context('cpu', 0)
- model = tvm.module.load(osp.join(CWD, 'build', 'enclave.signed.so'))
+ model = tvm.runtime.load_module(osp.join(CWD, 'build', 'enclave.signed.so'))
inp = tvm.nd.array(np.ones((1, 3, 224, 224), dtype='float32'), ctx)
out = tvm.nd.array(np.empty((1, 1000), dtype='float32'), ctx)
model(inp, out)
+++ /dev/null
-.. Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements. See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership. The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License. You may obtain a copy of the License at
-
-.. http://www.apache.org/licenses/LICENSE-2.0
-
-.. Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied. See the License for the
- specific language governing permissions and limitations
- under the License.
-
-Framework Bridge APIs
----------------------
-
-tvm.contrib.mxnet
-~~~~~~~~~~~~~~~~~
-.. automodule:: tvm.contrib.mxnet
- :members:
specific language governing permissions and limitations
under the License.
-Additional Contrib APIs
------------------------
+tvm.contrib
+-----------
.. automodule:: tvm.contrib
tvm.contrib.cblas
:members:
+tvm.contrib.dlpack
+~~~~~~~~~~~~~~~~~~
+.. automodule:: tvm.contrib.dlpack
+ :members:
+
tvm.contrib.emscripten
~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tvm.contrib.emscripten
.. automodule:: tvm.contrib.miopen
:members:
+tvm.contrib.mxnet
+~~~~~~~~~~~~~~~~~
+.. automodule:: tvm.contrib.mxnet
+ :members:
+
tvm.contrib.ndk
~~~~~~~~~~~~~~~
.. automodule:: tvm.contrib.ndk
:members:
-
tvm.contrib.xcode
~~~~~~~~~~~~~~~~~
.. automodule:: tvm.contrib.xcode
This page contains modules that are used by developers of TVM.
Many of these APIs are PackedFunc registered in C++ backend.
-tvm.object
-~~~~~~~~~~
-.. automodule:: tvm.object
-.. autoclass:: tvm.object.Object
- :members:
-
-.. autofunction:: tvm.register_object
tvm.expr
~~~~~~~~
:maxdepth: 2
tvm
+ runtime
+ ndarray
intrin
tensor
schedule
build
module
error
- ndarray
container
function
autotvm
rpc
bridge
contrib
+ ffi
dev
topi
vta/index
+++ /dev/null
-.. Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements. See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership. The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License. You may obtain a copy of the License at
-
-.. http://www.apache.org/licenses/LICENSE-2.0
-
-.. Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied. See the License for the
- specific language governing permissions and limitations
- under the License.
-
-tvm.module
-----------
-.. automodule:: tvm.module
- :members:
specific language governing permissions and limitations
under the License.
-tvm.ndarray
------------
-.. automodule:: tvm.ndarray
+tvm.runtime.ndarray
+-------------------
+.. automodule:: tvm.runtime.ndarray
-.. autoclass:: tvm.ndarray.TVMContext
+.. autoclass:: tvm.nd.NDArray
:members:
+ :inherited-members:
+
-.. autoclass:: tvm.ndarray.NDArray
+.. autoclass:: tvm.runtime.TVMContext
:members:
- :inherited-members:
+.. autofunction:: tvm.context
.. autofunction:: tvm.cpu
.. autofunction:: tvm.gpu
.. autofunction:: tvm.opencl
.. autofunction:: tvm.metal
-.. autofunction:: tvm.ndarray.array
-.. autofunction:: tvm.ndarray.empty
-
-.. autofunction:: tvm.register_extension
+.. autofunction:: tvm.nd.array
+.. autofunction:: tvm.nd.empty
specific language governing permissions and limitations
under the License.
-tvm.Function
-------------
-.. autoclass:: tvm.Function
+tvm.runtime
+-----------
+
+.. automodule:: tvm.runtime
+
+
+.. autoclass:: tvm.runtime.PackedFunc
+ :members:
.. autofunction:: tvm.register_func
.. autofunction:: tvm.get_global_func
+
+
+.. autoclass:: tvm.runtime.Module
+ :members:
+
+.. autofunction:: tvm.runtime.load_module
+
+.. autofunction:: tvm.runtime.system_lib
+
+.. autofunction:: tvm.runtime.enabled
+
+
+.. autoclass:: tvm.runtime.Object
+ :members:
+
+.. autofunction:: tvm.register_object
tgt="aocl_sw_emu"
-fadd = tvm.module.load("myadd.so")
-fadd_dev = tvm.module.load("myadd.aocx")
+fadd = tvm.runtime.load("myadd.so")
+fadd_dev = tvm.runtime.load("myadd.aocx")
fadd.import_module(fadd_dev)
ctx = tvm.context(tgt, 0)
tgt="sdaccel"
-fadd = tvm.module.load("myadd.so")
+fadd = tvm.runtime.load("myadd.so")
if os.environ.get("XCL_EMULATION_MODE"):
- fadd_dev = tvm.module.load("myadd.xclbin")
+ fadd_dev = tvm.runtime.load("myadd.xclbin")
else:
- fadd_dev = tvm.module.load("myadd.awsxclbin")
+ fadd_dev = tvm.runtime.load("myadd.awsxclbin")
fadd.import_module(fadd_dev)
ctx = tvm.context(tgt, 0)
resnet18_lib.export_library(path_lib)
# load it back
- loaded_lib = tvm.module.load(path_lib)
+ loaded_lib = tvm.runtime.load(path_lib)
assert loaded_lib.type_key == "library"
assert loaded_lib.imported_modules[0].type_key == "cuda"
Deserialization
****************
-The entrance API is ``tvm.module.load``. This function
+The entrance API is ``tvm.runtime.load``. This function
is to call ``_LoadFromFile`` in fact. If we dig it a little deeper, this is
``Module::LoadFromFile``. In our example, the file is ``deploy.so``,
according to the function logic, we will call ``module.loadfile_so`` in
class CodegenC : public ExprVisitor, public CodegenCBase {
public:
- explicit CodegenC(const std::string& id) { this->ext_func_id_ = id; }
+ explicit CodegenC(const std::string& id) { this->ext_func_id_ = id; }
void VisitExpr_(const VarNode* node) { ; }
void VisitExpr_(const CallNode* call) final { ; }
/*! \brief The declaration statements of buffers. */
std::vector<std::string> buf_decl_;
/*! \brief The name and index pairs for output. */
- std::vector<std::pair<std::string, int>> out_;
+ std::vector<std::pair<std::string, int>> out_;
}
The ``CodegenC`` class inherits two classes: ``ExprVisitor`` provides abilities to traverse subgraphs and collects the required information and generate subgraph functions such as ``gcc_0_``; ``CodegenCBase`` provides abilities and utilities to generate wrapper functions such as ``gcc_0`` in the above example. As can be seen, we only need to implement three functions in this codegen class to make it work.
curr_node <- Process curr_node curr_node <- Put "buf_0" as an input buffer
(a) out_ = {} (b) out_ = {} (c) out_ = {("buf_0", 20)}
-
+
We can see in the above figure, class variable ``out_`` is empty before visiting the argument node, and it was filled with the output buffer name and size of ``arg_node``. As a result, when we finished visiting the argument node, we know the proper input buffer we should put by looking at ``out_``. You will find out how we update ``out_`` at the end of this section as well as the next section.
input 0 10 10
input 1 10 10
input 2 10 10
- input 3 10 10
+ input 3 10 10
add 4 inputs: 0 1 shape: 10 10
sub 5 inputs: 4 2 shape: 10 10
add 6 inputs: 5 3 shape: 10 10
TVM_REGISTER_GLOBAL("module.loadbinary_examplejson")
.set_body_typed(ExampleJsonModule::LoadFromBinary);
-The above registration means when users call ``tvm.module.load(lib_path)`` API and the exported library has an ExampleJSON stream, our ``LoadFromBinary`` will be invoked to create the same customized runtime module.
+The above registration means when users call ``tvm.runtime.load(lib_path)`` API and the exported library has an ExampleJSON stream, our ``LoadFromBinary`` will be invoked to create the same customized runtime module.
In addition, if you want to support module creation directly from an ExampleJSON file, you can also implement a simple function and register a Python API as follows:
*rv = ExampleJsonModule::Create(args[0]);
});
-It means users can manually write/modify an ExampleJSON file, and use Python API ``tvm.module.load("mysubgraph.examplejson", "examplejson")`` to construct a customized module.
+It means users can manually write/modify an ExampleJSON file, and use Python API ``tvm.runtime.load("mysubgraph.examplejson", "examplejson")`` to construct a customized module.
*******
Summary
* A runtime module class derived from ``ModuleNode`` with following functions (for your graph representation).
- * Constructor.
+ * Constructor.
* ``GetFunction`` to generate a TVM runtime compatible ``PackedFunc``.
* ``Run`` to execute a subgraph.
* Register a runtime creation API.
* ``SaveToBinary`` and ``LoadFromBinary`` to serialize/deserialize customized runtime module.
- * Register ``LoadFromBinary`` API to support ``tvm.module.load(your_module_lib_path)``.
+ * Register ``LoadFromBinary`` API to support ``tvm.runtime.load(your_module_lib_path)``.
* (optional) ``Create`` to support customized runtime module construction from subgraph file in your representation.
* An annotator to annotate a user Relay program to make use of your compiler and runtime (TBA).
std::shared_ptr<PackedFunc> > import_cache_;
};
+/*!
+ * \brief Check if runtime module is enabled for target.
+ * \param target The target module name.
+ * \return Whether runtime is enabled.
+ */
+TVM_DLL bool RuntimeEnabled(const std::string& target);
+
/*! \brief namespace for constant symbols */
namespace symbol {
/*! \brief Global variable to store module context. */
from tvm.contrib import cc, util
def test_add(target_dir):
- if not tvm.module.enabled("cuda"):
+ if not tvm.runtime.enabled("cuda"):
print("skip %s because cuda is not enabled..." % __file__)
return
n = tvm.var("n")
# top-level alias
# tvm.runtime
from .runtime.object import Object
-from .runtime.packed_func import PackedFunc as Function
from .runtime.ndarray import context, cpu, gpu, opencl, cl, vulkan, metal, mtl
from .runtime.ndarray import vpi, rocm, opengl, ext_dev, micro_dev
-from .runtime import module
-from .runtime import ndarray
-# pylint: disable=reimported
from .runtime import ndarray as nd
# others
if not isinstance(handle, ObjectHandle):
handle = ObjectHandle(handle)
self.handle = handle
+
+ def same_as(self, other):
+ """Check object identity.
+
+ Parameters
+ ----------
+ other : object
+ The other object to compare against.
+
+ Returns
+ -------
+ result : bool
+ The comparison result.
+ """
+ if not isinstance(other, ObjectBase):
+ return False
+ if self.handle is None:
+ return other.handle is None
+ return self.handle.value == other.handle.value
(<PackedFuncBase>fconstructor).chandle,
kTVMObjectHandle, args, &chandle)
self.chandle = chandle
+
+ def same_as(self, other):
+ """Check object identity.
+
+ Parameters
+ ----------
+ other : object
+ The other object to compare against.
+
+ Returns
+ -------
+ result : bool
+ The comparison result.
+ """
+ if not isinstance(other, ObjectBase):
+ return False
+ return self.chandle == (<ObjectBase>other).chandle
from numbers import Integral as _Integral
import tvm._ffi
+import tvm.runtime._ffi_node_api
from tvm.runtime import convert, const, DataType
from ._ffi.base import string_types, TVMError
"""
try:
- return _api_internal._load_json(json_str)
+ return tvm.runtime._ffi_node_api.LoadJSON(json_str)
except TVMError:
json_str = json_compact.upgrade_json(json_str)
- return _api_internal._load_json(json_str)
+ return tvm.runtime._ffi_node_api.LoadJSON(json_str)
def save_json(node):
json_str : str
Saved json string.
"""
- return _api_internal._save_json(node)
+ return tvm.runtime._ffi_node_api.SaveJSON(node)
def var(name="tindex", dtype=int32):
"""
import warnings
import tvm._ffi
+import tvm.runtime
from tvm.runtime import Object, ndarray
from . import api
from . import ir_pass
from . import stmt as _stmt
from . import container
-from . import module
from . import codegen
from . import target as _target
from . import make
target_host = tar
break
if not target_host:
- target_host = "llvm" if module.enabled("llvm") else "stackvm"
+ target_host = "llvm" if tvm.runtime.enabled("llvm") else "stackvm"
fhost_all = []
device_modules = []
from tvm.runtime import Object, ObjectTypes
from tvm.runtime.container import getitem_helper
+from tvm.runtime import _ffi_node_api
from . import _api_internal
"""
def __getitem__(self, idx):
return getitem_helper(
- self, _api_internal._ArrayGetItem, len(self), idx)
+ self, _ffi_node_api.ArrayGetItem, len(self), idx)
def __len__(self):
- return _api_internal._ArraySize(self)
+ return _ffi_node_api.ArraySize(self)
@tvm._ffi.register_object
You can use convert to create a dict[Object-> Object] into a Map
"""
def __getitem__(self, k):
- return _api_internal._MapGetItem(self, k)
+ return _ffi_node_api.MapGetItem(self, k)
def __contains__(self, k):
- return _api_internal._MapCount(self, k) != 0
+ return _ffi_node_api.MapCount(self, k) != 0
def items(self):
"""Get the items from the map"""
- akvs = _api_internal._MapItems(self)
+ akvs = _ffi_node_api.MapItems(self)
return [(akvs[i], akvs[i+1]) for i in range(0, len(akvs), 2)]
def __len__(self):
- return _api_internal._MapSize(self)
+ return _ffi_node_api.MapSize(self)
@tvm._ffi.register_object
"""
def items(self):
"""Get the items from the map"""
- akvs = _api_internal._MapItems(self)
+ akvs = _ffi_node_api.MapItems(self)
return [(akvs[i].value, akvs[i+1]) for i in range(0, len(akvs), 2)]
param_bytes: bytearray
Serialized parameters.
"""
- _save_tensors = tvm.get_global_func("_save_param_dict")
+ _save_tensors = tvm.get_global_func("tvm.relay._save_param_dict")
args = []
for k, v in params.items():
# specific language governing permissions and limitations
# under the License.
"""Wrapping functions to bridge frameworks with DLPack support to TVM"""
-from .. import ndarray
+from tvm.runtime import ndarray
def convert_func(tvm_func, tensor_type, to_dlpack_func):
"""Convert a tvm function into one that accepts a tensor from another
"""MXNet bridge wrap Function MXNet's async function."""
from __future__ import absolute_import as _abs
-from .. import api, _api_internal, ndarray
-from ..module import Module
+import tvm._ffi.registry
+import tvm.runtime._ffi_api
+from tvm.runtime import Module
# pylint: disable=invalid-name
_wrap_async = None
"MXTVMBridge not exist in mxnet package,"
" please update to latest version")
- fdict = api.extract_ext_funcs(mxnet.base._LIB.MXTVMBridge)
+ fdict = tvm._ffi.registry.extract_ext_funcs(mxnet.base._LIB.MXTVMBridge)
ret = fdict["WrapAsyncCall"]
ret.is_global = True
return ret
if _wrap_async is None:
# Register extension type in first time
_wrap_async = _get_bridge_func()
- ndarray.register_extension(mxnet.nd.NDArray)
+ tvm._ffi.registry.register_extension(mxnet.nd.NDArray)
const_loc = const_loc if const_loc else []
- return _wrap_async(func, _api_internal._TVMSetStream, len(const_loc), *const_loc)
+ return _wrap_async(func, tvm.runtime._ffi_api.TVMSetStream,
+ len(const_loc), *const_loc)
# under the License.
"""Tensor and Operation class for computation declaration."""
# pylint: disable=invalid-name
-from __future__ import absolute_import as _abs
import numpy as _np
+from tvm.runtime import ndarray as _nd
+
from .. import expr as _expr
from .. import api as _api
from .. import tensor as _tensor
-from .. import ndarray as _nd
+
float32 = "float32"
itype = 'int32'
"""
# pylint: disable=missing-docstring
import tvm._ffi
-from tvm.runtime import Object, ObjectGeneric, DataType, TypeCode
+from tvm.runtime import Object, ObjectGeneric, DataType, TypeCode, const
from . import make as _make
from . import generic as _generic
return _make._OpFloorMod(self, other)
def __neg__(self):
- neg_one = _api_internal._const(-1, self.dtype)
+ neg_one = const(-1, self.dtype)
return self.__mul__(neg_one)
def __lshift__(self, other):
Parameters
----------
- c_mod : tvm.module.Module
+ c_mod : tvm.runtime.Module
module with "c" as its target backend
dev_config : Dict[str, Any]
Return
------
- micro_mod : tvm.module.Module
+ micro_mod : tvm.runtim.Module
micro module for the target device
"""
temp_dir = _util.tempdir()
c_mod.export_library(
lib_obj_path,
fcompile=cross_compiler(dev_config, LibType.OPERATOR))
- micro_mod = tvm.module.load(lib_obj_path)
+ micro_mod = tvm.runtime.load_module(lib_obj_path)
return micro_mod
def cross_compiler(dev_config, lib_type):
"""Create a cross-compile function that wraps `create_lib` for a `Binutil` instance.
- For use in `tvm.module.Module.export_library`.
+ For use in `tvm.runtime.Module.export_library`.
Parameters
----------
return _backend._CompileEngineLowerShapeFunc(self, key)
def jit(self, source_func, target=None):
- """JIT a source_func to a tvm.Function.
+ """JIT a source_func to a tvm.runtime.PackedFunc.
Parameters
----------
Returns
-------
- jited_func: tvm.Function
+ jited_func: tvm.runtime.PackedFunc
The result of jited function.
"""
key = _get_cache_key(source_func, target)
expr: relay.Expr
The expression to evaluate
- args: List[tvm.NDArray]
+ args: List[tvm.nd.NDArray]
The arguments to pass to the evaluator.
kwargs: Dict[str, tvm.NDArrray]
The keyword arguments to pass to the evaluator.
Returns:
- args: List[tvm.NDArray]
+ args: List[tvm.nd.NDArray]
The new arguments with all keyword arguments placed in the correct slot.
"""
assert expr is not None
can then be saved to disk and later deserialized into a new
Executable.
- lib : :py:class:`~tvm.module.Module`
+ lib : :py:class:`~tvm.runtime.Module`
The runtime module that contains the generated code. It is
basically a library that is composed of hardware dependent code.
lib.export_library(path_lib)
with open(tmp.relpath("code.ro"), "wb") as fo:
fo.write(code)
- loaded_lib = tvm.module.load(path_lib)
+ loaded_lib = tvm.runtime.load_module(path_lib)
loaded_code = bytearray(open(tmp.relpath("code.ro"), "rb").read())
# deserialize.
des_exec = relay.vm.Executable.load_exec(loaded_code, loaded_code)
bytecode : bytearray
The binary blob representing a the Relay VM bytecode.
- lib : :py:class:`~tvm.module.Module`
+ lib : :py:class:`~tvm.runtime.Module`
The runtime module that contains the generated code.
Returns
raise TypeError("bytecode is expected to be the type of bytearray " +
"or TVMByteArray, but received {}".format(type(code)))
- if lib is not None and not isinstance(lib, tvm.module.Module):
- raise TypeError("lib is expected to be the type of tvm.module.Module" +
+ if lib is not None and not isinstance(lib, tvm.runtime.Module):
+ raise TypeError("lib is expected to be the type of tvm.runtime.Module" +
", but received {}".format(type(lib)))
return Executable(_vm.Load_Executable(bytecode, lib))
class VirtualMachine(object):
"""Relay VM runtime."""
def __init__(self, mod):
- if not isinstance(mod, (Executable, tvm.module.Module)):
+ if not isinstance(mod, (Executable, tvm.runtime.Module)):
raise TypeError("mod is expected to be the type of Executable or " +
"tvm.Module, but received {}".format(type(mod)))
m = mod.module if isinstance(mod, Executable) else mod
target_host = tgt
break
if not target_host:
- target_host = "llvm" if tvm.module.enabled("llvm") else "stackvm"
+ target_host = "llvm" if tvm.runtime.enabled("llvm") else "stackvm"
if isinstance(target_host, str):
target_host = tvm.target.create(target_host)
return target_host
mod : tvm.relay.Module
The module that optimizations will be performed on.
- params : dict of str to tvm.ndarray
- Dict of converted parameters stored in tvm.ndarray format
+ params : dict of str to tvm.nd.NDArray
+ Dict of converted parameters stored in tvm.nd.NDArray format
"""
caffe2 = Caffe2NetDef(shape, dtype)
mod : tvm.relay.Module
The relay module for compilation.
- params : dict of str to tvm.NDArray
+ params : dict of str to tvm.nd.NDArray
The parameter dict to be used by Relay.
"""
try:
mod : tvm.relay.Module
The relay module for compilation.
- params : dict of str to tvm.NDArray
+ params : dict of str to tvm.nd.NDArray
The parameter dict to be used by relay
"""
mod : tvm.relay.Module
The relay module for compilation.
- params : dict of str to tvm.NDArray
+ params : dict of str to tvm.nd.NDArray
The parameter dict to be used by Relay.
"""
def _check_model_is_tf_keras():
mod : tvm.relay.Module
The relay module for compilation
- params : dict of str to tvm.NDArray
+ params : dict of str to tvm.nd.NDArray
The parameter dict to be used by nnvm
"""
try:
mod : tvm.relay.Module
The relay module for compilation
- params : dict of str to tvm.NDArray
+ params : dict of str to tvm.nd.NDArray
The parameter dict to be used by relay
"""
try:
mod : tvm.relay.Module
The module that optimizations will be performed on.
- params : dict of str to tvm.ndarray
- Dict of converted parameters stored in tvm.ndarray format
+ params : dict of str to tvm.nd.NDArray
+ Dict of converted parameters stored in tvm.nd.NDArray format
"""
g = GraphProto()
mod, params = g.from_tensorflow(graph, layout, shape, outputs)
mod : tvm.relay.Module
The relay module for compilation.
- params : dict of str to tvm.NDArray
+ params : dict of str to tvm.nd.NDArray
The parameter dict to be used by relay
"""
try:
from tvm._ffi.base import py_str
from tvm._ffi.libinfo import find_lib_path
-from tvm.runtime.module import load as _load_module
+from tvm.runtime.module import load_module as _load_module
from tvm.contrib import util
from . import base
from . base import TrackerCode
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
-"""TVM runtime."""
+"""TVM runtime namespace."""
# class exposures
from .packed_func import PackedFunc
from .object_generic import convert_to_object, convert, const
from .ndarray import context, cpu, gpu, opencl, cl, vulkan, metal, mtl
from .ndarray import vpi, rocm, opengl, ext_dev, micro_dev
-from .module import load as load_module
-
-DataType = DataType
+from .module import load_module, enabled, system_lib
--- /dev/null
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""FFI APIs for tvm.runtime"""
+import tvm._ffi
+
+# Exports functions registered via TVM_REGISTER_GLOBAL with the "runtime" prefix.
+# e.g. TVM_REGISTER_GLOBAL("runtime.ModuleLoadFromFile")
+tvm._ffi._init_api("runtime", __name__)
--- /dev/null
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# pylint: disable=invalid-name, unused-argument
+"""FFI for tvm.runtime.extra"""
+import tvm._ffi
+
+# The implementations below are default ones when the corresponding
+# functions are not available in the runtime only mode.
+# They will be overriden via _init_api to the ones registered
+# via TVM_REGISTER_GLOBAL in the compiler mode.
+def AsRepr(obj):
+ return obj.type_key() + "(" + obj.handle.value + ")"
+
+
+def NodeListAttrNames(obj):
+ return lambda x: 0
+
+
+def NodeGetAttr(obj, name):
+ raise AttributeError()
+
+
+def SaveJSON(obj):
+ raise RuntimeError(
+ "Do not support object serialization in runtime only mode")
+
+
+def LoadJSON(json_str):
+ raise RuntimeError(
+ "Do not support object serialization in runtime only mode")
+
+
+# Exports functions registered via TVM_REGISTER_GLOBAL with the "node" prefix.
+# e.g. TVM_REGISTER_GLOBAL("node.AsRepr")
+tvm._ffi._init_api("node", __name__)
from tvm._ffi.libinfo import find_include_path
from .packed_func import PackedFunc, PackedFuncHandle, _set_class_module
+from . import _ffi_api
+
# profile result of time evaluator
ProfileResult = namedtuple("ProfileResult", ["mean", "results"])
Returns
-------
- f : Function
+ f : tvm.runtime.PackedFunc
The entry function if exist
"""
if self._entry:
Returns
-------
- f : Function
+ f : tvm.runtime.PackedFunc
The result function.
"""
ret_handle = PackedFuncHandle()
Parameters
----------
- module : Module
+ module : tvm.runtime.Module
The other module.
"""
check_call(_LIB.TVMModImport(self.handle, module.handle))
@property
def type_key(self):
"""Get type key of the module."""
- return _GetTypeKey(self)
+ return _ffi_api.ModuleGetTypeKey(self)
def get_source(self, fmt=""):
"""Get source code from module, if available.
source : str
The result source code.
"""
- return _GetSource(self, fmt)
+ return _ffi_api.ModuleGetSource(self, fmt)
@property
def imported_modules(self):
modules : list of Module
The module
"""
- nmod = _ImportsSize(self)
- return [_GetImport(self, i) for i in range(nmod)]
+ nmod = _ffi_api.ModuleImportsSize(self)
+ return [_ffi_api.ModuleGetImport(self, i) for i in range(nmod)]
def save(self, file_name, fmt=""):
"""Save the module to file.
See Also
--------
- Module.export_library : export the module to shared library.
+ runtime.Module.export_library : export the module to shared library.
"""
- _SaveToFile(self, file_name, fmt)
+ _ffi_api.ModuleSaveToFile(self, file_name, fmt)
def time_evaluator(self, func_name, ctx, number=10, repeat=1, min_repeat_ms=0):
"""Get an evaluator that measures time cost of running function.
Returns
-------
- ftimer : Function
+ ftimer : function
The function that takes same argument as func and returns a ProfileResult.
The ProfileResult reports `repeat` time costs in seconds.
"""
try:
- feval = _RPCTimeEvaluator(
- self, func_name, ctx.device_type, ctx.device_id, number, repeat, min_repeat_ms)
+ feval = _ffi_api.RPCTimeEvaluator(
+ self, func_name, ctx.device_type, ctx.device_id,
+ number, repeat, min_repeat_ms)
def evaluator(*args):
"""Internal wrapped evaluator."""
if self.imported_modules:
if enabled("llvm") and llvm_target_triple:
path_obj = temp.relpath("devc.o")
- m = _PackImportsToLLVM(self, is_system_lib, llvm_target_triple)
+ m = _ffi_api.ModulePackImportsToLLVM(self, is_system_lib, llvm_target_triple)
m.save(path_obj)
files.append(path_obj)
else:
path_cc = temp.relpath("devc.cc")
with open(path_cc, "w") as f:
- f.write(_PackImportsToC(self, is_system_lib))
+ f.write(_ffi_api.ModulePackImportsToC(self, is_system_lib))
files.append(path_cc)
if has_c_module:
Returns
-------
- module : Module
+ module : runtime.Module
The system-wide library module.
"""
- return _GetSystemLib()
+ return _ffi_api.SystemLib()
-def load(path, fmt=""):
+def load_module(path, fmt=""):
"""Load module from file.
Parameters
Returns
-------
- module : Module
+ module : runtime.Module
The loaded module
Note
elif path.endswith(".obj"):
fmt = "micro_dev"
# Redirect to the load API
- return _LoadFromFile(path, fmt)
+ return _ffi_api.ModuleLoadFromFile(path, fmt)
def enabled(target):
--------
The following code checks if gpu is enabled.
- >>> tvm.module.enabled("gpu")
+ >>> tvm.runtime.enabled("gpu")
"""
- return _Enabled(target)
+ return _ffi_api.RuntimeEnabled(target)
_set_class_module(Module)
-
-tvm._ffi._init_api("tvm.module", "tvm.runtime.module")
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-import
-"""Runtime NDArray api"""
+"""Runtime NDArray API"""
import ctypes
import numpy as np
import tvm._ffi
return self
def __repr__(self):
- res = "<tvm.NDArray shape={0}, {1}>\n".format(self.shape, self.context)
+ res = "<tvm.nd.NDArray shape={0}, {1}>\n".format(self.shape, self.context)
res += self.asnumpy().__repr__()
return res
Returns
-------
- ctx: TVMContext
+ ctx: tvm.runtime.TVMContext
The corresponding context.
Examples
import ctypes
from tvm._ffi.base import _FFI_MODE, _RUNTIME_ONLY, check_call, _LIB, c_str
-from .. import _api_internal
+from . import _ffi_api, _ffi_node_api
try:
# pylint: disable=wrong-import-position,unused-import
class Object(ObjectBase):
"""Base class for all tvm's runtime objects."""
def __repr__(self):
- return _api_internal._format_str(self)
+ return _ffi_node_api.AsRepr(self)
def __dir__(self):
- fnames = _api_internal._NodeListAttrNames(self)
+ fnames = _ffi_node_api.NodeListAttrNames(self)
size = fnames(-1)
return [fnames(i) for i in range(size)]
def __getattr__(self, name):
try:
- return _api_internal._NodeGetAttr(self, name)
+ return _ffi_node_api.NodeGetAttr(self, name)
except AttributeError:
raise AttributeError(
"%s has no attribute %s" % (str(type(self)), name))
def __hash__(self):
- return _api_internal._raw_ptr(self)
+ return _ffi_api.ObjectHash(self)
def __eq__(self, other):
return self.same_as(other)
def __getstate__(self):
handle = self.handle
if handle is not None:
- return {'handle': _api_internal._save_json(self)}
+ return {'handle': _ffi_node_api.SaveJSON(self)}
return {'handle': None}
def __setstate__(self, state):
- # pylint: disable=assigning-non-slot
+ # pylint: disable=assigning-non-slot, assignment-from-no-return
handle = state['handle']
if handle is not None:
json_str = handle
- other = _api_internal._load_json(json_str)
+ other = _ffi_node_api.LoadJSON(json_str)
self.handle = other.handle
other.handle = None
else:
self.handle = None
- def same_as(self, other):
- """check object identity equality"""
- if not isinstance(other, Object):
- return False
- return self.__hash__() == other.__hash__()
-
_set_class_object(Object)
from numbers import Number, Integral
from tvm._ffi.base import string_types
-from .. import _api_internal
+from . import _ffi_node_api
from .object import ObjectBase, _set_class_object_generic
from .ndarray import NDArrayBase
from .packed_func import PackedFuncBase, convert_to_tvm_func
if isinstance(value, Number):
return const(value)
if isinstance(value, string_types):
- return _api_internal._str(value)
+ return _ffi_node_api.String(value)
if isinstance(value, (list, tuple)):
value = [convert_to_object(x) for x in value]
- return _api_internal._Array(*value)
+ return _ffi_node_api.Array(*value)
if isinstance(value, dict):
vlist = []
for item in value.items():
raise ValueError("key of map must already been a container type")
vlist.append(item[0])
vlist.append(convert_to_object(item[1]))
- return _api_internal._Map(*vlist)
+ return _ffi_node_api.Map(*vlist)
if isinstance(value, ObjectGeneric):
return value.asobject()
if value is None:
if dtype is None:
dtype = _scalar_type_inference(value)
if dtype == "uint64" and value >= (1 << 63):
- return _api_internal._LargeUIntImm(
+ return _ffi_node_api.LargeUIntImm(
dtype, value & ((1 << 32) - 1), value >> 32)
- return _api_internal._const(value, dtype)
+ return _ffi_node_api._const(value, dtype)
_set_class_object_generic(ObjectGeneric, convert_to_object)
For example, the developer function exposed in tvm.ir_pass are actually
C++ functions that are registered as PackedFunc
- The following are list of common usage scenario of tvm.Function.
+ The following are list of common usage scenario of tvm.runtime.PackedFunc.
- Automatic exposure of C++ API into python
- To call PackedFunc from python side
Build TVM system library module. System lib is a global module that contains
self registered functions in program startup. User can get the module using
- :any:`tvm.module.system_lib`.
+ :any:`tvm.runtime.system_lib`.
It is useful in environments where dynamic loading api like dlopen is banned.
The system lib will be available as long as the result code is linked by the program.
from tvm.contrib import cc
def test_add(target_dir):
- if not tvm.module.enabled("cuda"):
+ if not tvm.runtime.enabled("cuda"):
print("skip {__file__} because cuda is not enabled...".format(__file__=__file__))
return
n = tvm.var("n")
if args.pretrained:
# needs mxnet installed
from mxnet.gluon.model_zoo.vision import get_model
-
+
# if `--pretrained` is enabled, it downloads a pretrained
# resnet18 trained on imagenet1k dataset for image classification task
block = get_model('resnet18_v1', pretrained=True)
def test_build(build_dir):
""" Sanity check with random input"""
graph = open(osp.join(build_dir, "deploy_graph.json")).read()
- lib = tvm.module.load(osp.join(build_dir, "deploy_lib.so"))
+ lib = tvm.runtime.load(osp.join(build_dir, "deploy_lib.so"))
params = bytearray(open(osp.join(build_dir,"deploy_param.params"), "rb").read())
input_data = tvm.nd.array(np.random.uniform(size=data_shape).astype("float32"))
ctx = tvm.cpu()
+++ /dev/null
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
- /*!
- * Implementation of basic API functions
- * \file api_base.cc
- */
-#include <dmlc/memory_io.h>
-#include <tvm/tir/expr.h>
-#include <tvm/te/tensor.h>
-#include <tvm/runtime/registry.h>
-#include <tvm/node/serialization.h>
-
-namespace tvm {
-TVM_REGISTER_GLOBAL("_format_str")
-.set_body([](TVMArgs args, TVMRetValue *ret) {
- CHECK(args[0].type_code() == kTVMObjectHandle);
- std::ostringstream os;
- os << args[0].operator ObjectRef();
- *ret = os.str();
- });
-
-TVM_REGISTER_GLOBAL("_raw_ptr")
-.set_body([](TVMArgs args, TVMRetValue *ret) {
- CHECK(args[0].type_code() == kTVMObjectHandle);
- *ret = reinterpret_cast<int64_t>(args[0].value().v_handle);
- });
-
-TVM_REGISTER_GLOBAL("_save_json")
-.set_body_typed(SaveJSON);
-
-TVM_REGISTER_GLOBAL("_load_json")
-.set_body_typed(LoadJSON);
-
-TVM_REGISTER_GLOBAL("_TVMSetStream")
-.set_body_typed(TVMSetStream);
-
-TVM_REGISTER_GLOBAL("_save_param_dict")
-.set_body([](TVMArgs args, TVMRetValue *rv) {
- CHECK_EQ(args.size() % 2, 0u);
- constexpr uint64_t TVMNDArrayListMagic = 0xF7E58D4F05049CB7;
- size_t num_params = args.size() / 2;
- std::vector<std::string> names;
- names.reserve(num_params);
- std::vector<DLTensor*> arrays;
- arrays.reserve(num_params);
- for (size_t i = 0; i < num_params * 2; i += 2) {
- names.emplace_back(args[i].operator std::string());
- arrays.emplace_back(args[i + 1].operator DLTensor*());
- }
- std::string bytes;
- dmlc::MemoryStringStream strm(&bytes);
- dmlc::Stream* fo = &strm;
- uint64_t header = TVMNDArrayListMagic, reserved = 0;
- fo->Write(header);
- fo->Write(reserved);
- fo->Write(names);
- {
- uint64_t sz = static_cast<uint64_t>(arrays.size());
- fo->Write(sz);
- for (size_t i = 0; i < sz; ++i) {
- tvm::runtime::SaveDLTensor(fo, arrays[i]);
- }
- }
- TVMByteArray arr;
- arr.data = bytes.c_str();
- arr.size = bytes.length();
- *rv = arr;
- });
-
-} // namespace tvm
+++ /dev/null
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-/*!
- * Implementation of API functions related to Codegen
- * \file c_api_codegen.cc
- */
-#include <tvm/tir/expr.h>
-#include <tvm/tir/expr.h>
-#include <tvm/target/codegen.h>
-#include <tvm/tir/lowered_func.h>
-#include <tvm/runtime/registry.h>
-
-namespace tvm {
-namespace codegen {
-
-TVM_REGISTER_GLOBAL("codegen._Build")
-.set_body([](TVMArgs args, TVMRetValue *ret) {
- if (args[0].IsObjectRef<tir::LoweredFunc>()) {
- *ret = Build({args[0]}, args[1]);
- } else {
- *ret = Build(args[0], args[1]);
- }
- });
-
-TVM_REGISTER_GLOBAL("module._PackImportsToC")
-.set_body_typed(PackImportsToC);
-
-TVM_REGISTER_GLOBAL("module._PackImportsToLLVM")
-.set_body_typed(PackImportsToLLVM);
-} // namespace codegen
-} // namespace tvm
* Implementation of API functions related to Higher DSL build.
* \file api_lang.cc
*/
-#include <tvm/tir/expr.h>
+#include <tvm/runtime/registry.h>
#include <tvm/tir/expr.h>
#include <tvm/te/tensor.h>
#include <tvm/te/operation.h>
#include <tvm/driver/driver_api.h>
#include <tvm/tir/data_layout.h>
-
namespace tvm {
TVM_REGISTER_GLOBAL("_min_value")
TVM_REGISTER_GLOBAL("_max_value")
.set_body_typed(max_value);
-TVM_REGISTER_GLOBAL("_const")
-.set_body([](TVMArgs args, TVMRetValue* ret) {
- if (args[0].type_code() == kDLInt) {
- *ret = tir::make_const(args[1], args[0].operator int64_t());
- } else if (args[0].type_code() == kDLFloat) {
- *ret = tir::make_const(args[1], args[0].operator double());
- } else {
- LOG(FATAL) << "only accept int or float";
- }
- });
-
-TVM_REGISTER_GLOBAL("_LargeUIntImm")
-.set_body_typed(LargeUIntImm);
-
-TVM_REGISTER_GLOBAL("_str")
-.set_body_typed(tir::StringImmNode::make);
-
-
-TVM_REGISTER_GLOBAL("_Array")
-.set_body([](TVMArgs args, TVMRetValue* ret) {
- std::vector<ObjectRef> data;
- for (int i = 0; i < args.size(); ++i) {
- if (args[i].type_code() != kTVMNullptr) {
- data.push_back(args[i].operator ObjectRef());
- } else {
- data.push_back(ObjectRef(nullptr));
- }
- }
- auto node = make_object<ArrayNode>();
- node->data = std::move(data);
- *ret = Array<ObjectRef>(node);
- });
-
-TVM_REGISTER_GLOBAL("_ArrayGetItem")
-.set_body([](TVMArgs args, TVMRetValue* ret) {
- int64_t i = args[1];
- CHECK_EQ(args[0].type_code(), kTVMObjectHandle);
- Object* ptr = static_cast<Object*>(args[0].value().v_handle);
- CHECK(ptr->IsInstance<ArrayNode>());
- auto* n = static_cast<const ArrayNode*>(ptr);
- CHECK_LT(static_cast<size_t>(i), n->data.size())
- << "out of bound of array";
- *ret = n->data[static_cast<size_t>(i)];
- });
-
-TVM_REGISTER_GLOBAL("_ArraySize")
-.set_body([](TVMArgs args, TVMRetValue* ret) {
- CHECK_EQ(args[0].type_code(), kTVMObjectHandle);
- Object* ptr = static_cast<Object*>(args[0].value().v_handle);
- CHECK(ptr->IsInstance<ArrayNode>());
- *ret = static_cast<int64_t>(
- static_cast<const ArrayNode*>(ptr)->data.size());
- });
-
-TVM_REGISTER_GLOBAL("_Map")
-.set_body([](TVMArgs args, TVMRetValue* ret) {
- CHECK_EQ(args.size() % 2, 0);
- if (args.size() != 0 && args[0].type_code() == kTVMStr) {
- // StrMap
- StrMapNode::ContainerType data;
- for (int i = 0; i < args.num_args; i += 2) {
- CHECK(args[i].type_code() == kTVMStr)
- << "key of str map need to be str";
- CHECK(args[i + 1].IsObjectRef<ObjectRef>())
- << "value of the map to be NodeRef";
- data.emplace(std::make_pair(args[i].operator std::string(),
- args[i + 1].operator ObjectRef()));
- }
- auto node = make_object<StrMapNode>();
- node->data = std::move(data);
- *ret = Map<ObjectRef, ObjectRef>(node);
- } else {
- // Container node.
- MapNode::ContainerType data;
- for (int i = 0; i < args.num_args; i += 2) {
- CHECK(args[i].IsObjectRef<ObjectRef>())
- << "key of str map need to be object";
- CHECK(args[i + 1].IsObjectRef<ObjectRef>())
- << "value of map to be NodeRef";
- data.emplace(std::make_pair(args[i].operator ObjectRef(),
- args[i + 1].operator ObjectRef()));
- }
- auto node = make_object<MapNode>();
- node->data = std::move(data);
- *ret = Map<ObjectRef, ObjectRef>(node);
- }
- });
-
-TVM_REGISTER_GLOBAL("_MapSize")
-.set_body([](TVMArgs args, TVMRetValue* ret) {
- CHECK_EQ(args[0].type_code(), kTVMObjectHandle);
- Object* ptr = static_cast<Object*>(args[0].value().v_handle);
- if (ptr->IsInstance<MapNode>()) {
- auto* n = static_cast<const MapNode*>(ptr);
- *ret = static_cast<int64_t>(n->data.size());
- } else {
- CHECK(ptr->IsInstance<StrMapNode>());
- auto* n = static_cast<const StrMapNode*>(ptr);
- *ret = static_cast<int64_t>(n->data.size());
- }
- });
-
-TVM_REGISTER_GLOBAL("_MapGetItem")
-.set_body([](TVMArgs args, TVMRetValue* ret) {
- CHECK_EQ(args[0].type_code(), kTVMObjectHandle);
- Object* ptr = static_cast<Object*>(args[0].value().v_handle);
-
- if (ptr->IsInstance<MapNode>()) {
- CHECK(args[1].type_code() == kTVMObjectHandle);
- auto* n = static_cast<const MapNode*>(ptr);
- auto it = n->data.find(args[1].operator ObjectRef());
- CHECK(it != n->data.end())
- << "cannot find the corresponding key in the Map";
- *ret = (*it).second;
- } else {
- CHECK(ptr->IsInstance<StrMapNode>());
- auto* n = static_cast<const StrMapNode*>(ptr);
- auto it = n->data.find(args[1].operator std::string());
- CHECK(it != n->data.end())
- << "cannot find the corresponding key in the Map";
- *ret = (*it).second;
- }
- });
-
-TVM_REGISTER_GLOBAL("_MapCount")
-.set_body([](TVMArgs args, TVMRetValue* ret) {
- CHECK_EQ(args[0].type_code(), kTVMObjectHandle);
- Object* ptr = static_cast<Object*>(args[0].value().v_handle);
-
- if (ptr->IsInstance<MapNode>()) {
- auto* n = static_cast<const MapNode*>(ptr);
- CHECK_EQ(args[0].type_code(), kTVMObjectHandle);
- *ret = static_cast<int64_t>(
- n->data.count(args[1].operator ObjectRef()));
- } else {
- CHECK(ptr->IsInstance<StrMapNode>());
- auto* n = static_cast<const StrMapNode*>(ptr);
- *ret = static_cast<int64_t>(
- n->data.count(args[1].operator std::string()));
- }
- });
-
-TVM_REGISTER_GLOBAL("_MapItems")
-.set_body([](TVMArgs args, TVMRetValue* ret) {
- CHECK_EQ(args[0].type_code(), kTVMObjectHandle);
- Object* ptr = static_cast<Object*>(args[0].value().v_handle);
-
- if (ptr->IsInstance<MapNode>()) {
- auto* n = static_cast<const MapNode*>(ptr);
- auto rkvs = make_object<ArrayNode>();
- for (const auto& kv : n->data) {
- rkvs->data.push_back(kv.first);
- rkvs->data.push_back(kv.second);
- }
- *ret = Array<ObjectRef>(rkvs);
- } else {
- auto* n = static_cast<const StrMapNode*>(ptr);
- auto rkvs = make_object<ArrayNode>();
- for (const auto& kv : n->data) {
- rkvs->data.push_back(tir::StringImmNode::make(kv.first));
- rkvs->data.push_back(kv.second);
- }
- *ret = Array<ObjectRef>(rkvs);
- }
- });
-
TVM_REGISTER_GLOBAL("Range")
.set_body([](TVMArgs args, TVMRetValue* ret) {
if (args.size() == 1) {
--- /dev/null
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+/*!
+ * Expose container API to frontend.
+ * \file src/node/container.cc
+ */
+#include <tvm/runtime/registry.h>
+#include <tvm/node/container.h>
+#include <tvm/tir/expr.h>
+
+namespace tvm {
+
+TVM_REGISTER_GLOBAL("node.Array")
+.set_body([](TVMArgs args, TVMRetValue* ret) {
+ std::vector<ObjectRef> data;
+ for (int i = 0; i < args.size(); ++i) {
+ if (args[i].type_code() != kTVMNullptr) {
+ data.push_back(args[i].operator ObjectRef());
+ } else {
+ data.push_back(ObjectRef(nullptr));
+ }
+ }
+ auto node = make_object<ArrayNode>();
+ node->data = std::move(data);
+ *ret = Array<ObjectRef>(node);
+ });
+
+TVM_REGISTER_GLOBAL("node.ArrayGetItem")
+.set_body([](TVMArgs args, TVMRetValue* ret) {
+ int64_t i = args[1];
+ CHECK_EQ(args[0].type_code(), kTVMObjectHandle);
+ Object* ptr = static_cast<Object*>(args[0].value().v_handle);
+ CHECK(ptr->IsInstance<ArrayNode>());
+ auto* n = static_cast<const ArrayNode*>(ptr);
+ CHECK_LT(static_cast<size_t>(i), n->data.size())
+ << "out of bound of array";
+ *ret = n->data[static_cast<size_t>(i)];
+ });
+
+TVM_REGISTER_GLOBAL("node.ArraySize")
+.set_body([](TVMArgs args, TVMRetValue* ret) {
+ CHECK_EQ(args[0].type_code(), kTVMObjectHandle);
+ Object* ptr = static_cast<Object*>(args[0].value().v_handle);
+ CHECK(ptr->IsInstance<ArrayNode>());
+ *ret = static_cast<int64_t>(
+ static_cast<const ArrayNode*>(ptr)->data.size());
+ });
+
+TVM_REGISTER_GLOBAL("node.Map")
+.set_body([](TVMArgs args, TVMRetValue* ret) {
+ CHECK_EQ(args.size() % 2, 0);
+ if (args.size() != 0 && args[0].type_code() == kTVMStr) {
+ // StrMap
+ StrMapNode::ContainerType data;
+ for (int i = 0; i < args.num_args; i += 2) {
+ CHECK(args[i].type_code() == kTVMStr)
+ << "key of str map need to be str";
+ CHECK(args[i + 1].IsObjectRef<ObjectRef>())
+ << "value of the map to be NodeRef";
+ data.emplace(std::make_pair(args[i].operator std::string(),
+ args[i + 1].operator ObjectRef()));
+ }
+ auto node = make_object<StrMapNode>();
+ node->data = std::move(data);
+ *ret = Map<ObjectRef, ObjectRef>(node);
+ } else {
+ // Container node.
+ MapNode::ContainerType data;
+ for (int i = 0; i < args.num_args; i += 2) {
+ CHECK(args[i].IsObjectRef<ObjectRef>())
+ << "key of str map need to be object";
+ CHECK(args[i + 1].IsObjectRef<ObjectRef>())
+ << "value of map to be NodeRef";
+ data.emplace(std::make_pair(args[i].operator ObjectRef(),
+ args[i + 1].operator ObjectRef()));
+ }
+ auto node = make_object<MapNode>();
+ node->data = std::move(data);
+ *ret = Map<ObjectRef, ObjectRef>(node);
+ }
+ });
+
+
+TVM_REGISTER_GLOBAL("node.MapSize")
+.set_body([](TVMArgs args, TVMRetValue* ret) {
+ CHECK_EQ(args[0].type_code(), kTVMObjectHandle);
+ Object* ptr = static_cast<Object*>(args[0].value().v_handle);
+ if (ptr->IsInstance<MapNode>()) {
+ auto* n = static_cast<const MapNode*>(ptr);
+ *ret = static_cast<int64_t>(n->data.size());
+ } else {
+ CHECK(ptr->IsInstance<StrMapNode>());
+ auto* n = static_cast<const StrMapNode*>(ptr);
+ *ret = static_cast<int64_t>(n->data.size());
+ }
+ });
+
+TVM_REGISTER_GLOBAL("node.MapGetItem")
+.set_body([](TVMArgs args, TVMRetValue* ret) {
+ CHECK_EQ(args[0].type_code(), kTVMObjectHandle);
+ Object* ptr = static_cast<Object*>(args[0].value().v_handle);
+
+ if (ptr->IsInstance<MapNode>()) {
+ CHECK(args[1].type_code() == kTVMObjectHandle);
+ auto* n = static_cast<const MapNode*>(ptr);
+ auto it = n->data.find(args[1].operator ObjectRef());
+ CHECK(it != n->data.end())
+ << "cannot find the corresponding key in the Map";
+ *ret = (*it).second;
+ } else {
+ CHECK(ptr->IsInstance<StrMapNode>());
+ auto* n = static_cast<const StrMapNode*>(ptr);
+ auto it = n->data.find(args[1].operator std::string());
+ CHECK(it != n->data.end())
+ << "cannot find the corresponding key in the Map";
+ *ret = (*it).second;
+ }
+ });
+
+TVM_REGISTER_GLOBAL("node.MapCount")
+.set_body([](TVMArgs args, TVMRetValue* ret) {
+ CHECK_EQ(args[0].type_code(), kTVMObjectHandle);
+ Object* ptr = static_cast<Object*>(args[0].value().v_handle);
+
+ if (ptr->IsInstance<MapNode>()) {
+ auto* n = static_cast<const MapNode*>(ptr);
+ CHECK_EQ(args[0].type_code(), kTVMObjectHandle);
+ *ret = static_cast<int64_t>(
+ n->data.count(args[1].operator ObjectRef()));
+ } else {
+ CHECK(ptr->IsInstance<StrMapNode>());
+ auto* n = static_cast<const StrMapNode*>(ptr);
+ *ret = static_cast<int64_t>(
+ n->data.count(args[1].operator std::string()));
+ }
+ });
+
+TVM_REGISTER_GLOBAL("node.MapItems")
+.set_body([](TVMArgs args, TVMRetValue* ret) {
+ CHECK_EQ(args[0].type_code(), kTVMObjectHandle);
+ Object* ptr = static_cast<Object*>(args[0].value().v_handle);
+
+ if (ptr->IsInstance<MapNode>()) {
+ auto* n = static_cast<const MapNode*>(ptr);
+ auto rkvs = make_object<ArrayNode>();
+ for (const auto& kv : n->data) {
+ rkvs->data.push_back(kv.first);
+ rkvs->data.push_back(kv.second);
+ }
+ *ret = Array<ObjectRef>(rkvs);
+ } else {
+ auto* n = static_cast<const StrMapNode*>(ptr);
+ auto rkvs = make_object<ArrayNode>();
+ for (const auto& kv : n->data) {
+ rkvs->data.push_back(tir::StringImmNode::make(kv.first));
+ rkvs->data.push_back(kv.second);
+ }
+ *ret = Array<ObjectRef>(rkvs);
+ }
+ });
+} // namespace tvm
}
-TVM_REGISTER_GLOBAL("_NodeGetAttr")
+TVM_REGISTER_GLOBAL("node.NodeGetAttr")
.set_body(NodeGetAttr);
-TVM_REGISTER_GLOBAL("_NodeListAttrNames")
+TVM_REGISTER_GLOBAL("node.NodeListAttrNames")
.set_body(NodeListAttrNames);
TVM_REGISTER_GLOBAL("make._Node")
.set_body(MakeNode);
-
} // namespace tvm
* Printer utilities
* \file node/repr_printer.cc
*/
+#include <tvm/runtime/registry.h>
#include <tvm/node/repr_printer.h>
namespace tvm {
void Dump(const ObjectRef& n) {
std::cerr << n << "\n";
}
+
+TVM_REGISTER_GLOBAL("node.AsRepr")
+.set_body_typed([](runtime::ObjectRef obj) {
+ std::ostringstream os;
+ os << obj;
+ return os.str();
+});
} // namespace tvm
*/
#include <dmlc/json.h>
#include <dmlc/memory_io.h>
-
+#include <tvm/runtime/registry.h>
#include <tvm/runtime/ndarray.h>
#include <tvm/runtime/packed_func.h>
#include <tvm/node/container.h>
}
return ObjectRef(nodes.at(jgraph.root));
}
+
+TVM_REGISTER_GLOBAL("node.SaveJSON")
+.set_body_typed(SaveJSON);
+
+TVM_REGISTER_GLOBAL("node.LoadJSON")
+.set_body_typed(LoadJSON);
} // namespace tvm
}
// Create a CSourceModule
- const auto* pf = runtime::Registry::Get("module.csource_module_create");
+ const auto* pf = runtime::Registry::Get("runtime.CSourceModuleCreate");
CHECK(pf != nullptr) << "Cannot find csource module to create the external runtime module";
return (*pf)(code_stream_.str(), "cc");
}
}
// Create a CSourceModule
- const auto* pf = runtime::Registry::Get("module.csource_module_create");
+ const auto* pf = runtime::Registry::Get("runtime.CSourceModuleCreate");
CHECK(pf != nullptr) << "Cannot find csource module to create the external runtime module";
return (*pf)(code_stream_.str(), "cc");
}
DeviceAPIManager::Get(ctx)->GetAttr(ctx, kind, ret);
}
});
+
+
+TVM_REGISTER_GLOBAL("runtime.TVMSetStream")
+.set_body_typed(TVMSetStream);
std::vector<std::string> op_id_;
};
-TVM_REGISTER_GLOBAL("module.loadfile_examplejson")
+TVM_REGISTER_GLOBAL("runtime.module.loadfile_examplejson")
.set_body([](TVMArgs args, TVMRetValue* rv) {
*rv = ExampleJsonModule::Create(args[0]);
});
-TVM_REGISTER_GLOBAL("module.loadbinary_examplejson")
+TVM_REGISTER_GLOBAL("runtime.module.loadbinary_examplejson")
.set_body_typed(ExampleJsonModule::LoadFromBinary);
} // namespace runtime
return CUDAModuleCreate(data, fmt, fmap, std::string());
}
-TVM_REGISTER_GLOBAL("module.loadfile_cubin")
+TVM_REGISTER_GLOBAL("runtime.module.loadfile_cubin")
.set_body_typed(CUDAModuleLoadFile);
-TVM_REGISTER_GLOBAL("module.loadfile_ptx")
+TVM_REGISTER_GLOBAL("runtime.module.loadfile_ptx")
.set_body_typed(CUDAModuleLoadFile);
-TVM_REGISTER_GLOBAL("module.loadbinary_cuda")
+TVM_REGISTER_GLOBAL("runtime.module.loadbinary_cuda")
.set_body_typed(CUDAModuleLoadBinary);
} // namespace runtime
} // namespace tvm
#endif
};
-TVM_REGISTER_GLOBAL("module.loadfile_so")
+TVM_REGISTER_GLOBAL("runtime.module.loadfile_so")
.set_body([](TVMArgs args, TVMRetValue* rv) {
auto n = make_object<DSOLibrary>();
n->Init(args[0]);
CHECK(stream->Read(&import_tree_row_ptr));
CHECK(stream->Read(&import_tree_child_indices));
} else {
- std::string fkey = "module.loadbinary_" + tkey;
+ std::string fkey = "runtime.module.loadbinary_" + tkey;
const PackedFunc* f = Registry::Get(fkey);
CHECK(f != nullptr)
<< "Loader of " << tkey << "("
return MetalModuleCreate(data, fmt, fmap, "");
}
-TVM_REGISTER_GLOBAL("module.loadfile_metal")
+TVM_REGISTER_GLOBAL("runtime.module.loadfile_metal")
.set_body_typed(MetalModuleLoadFile);
-TVM_REGISTER_GLOBAL("module.loadbinary_metal")
+TVM_REGISTER_GLOBAL("runtime.module.loadbinary_metal")
.set_body_typed(MetalModuleLoadBinary);
} // namespace runtime
} // namespace tvm
}
// register loadfile function to load module from Python frontend
-TVM_REGISTER_GLOBAL("module.loadfile_micro_dev")
+TVM_REGISTER_GLOBAL("runtime.module.loadfile_micro_dev")
.set_body([](TVMArgs args, TVMRetValue* rv) {
auto n = make_object<MicroModuleNode>();
n->InitMicroModule(args[0]);
if (fmt == "dll" || fmt == "dylib" || fmt == "dso") {
fmt = "so";
}
- std::string load_f_name = "module.loadfile_" + fmt;
+ std::string load_f_name = "runtime.module.loadfile_" + fmt;
const PackedFunc* f = Registry::Get(load_f_name);
CHECK(f != nullptr)
<< "Loader of " << format << "("
return runtime::Registry::Get(f_name) != nullptr;
}
-TVM_REGISTER_GLOBAL("module._Enabled")
-.set_body([](TVMArgs args, TVMRetValue *ret) {
- *ret = RuntimeEnabled(args[0]);
- });
-
-TVM_REGISTER_GLOBAL("module._GetSource")
-.set_body([](TVMArgs args, TVMRetValue *ret) {
- *ret = args[0].operator Module()->GetSource(args[1]);
- });
-
-TVM_REGISTER_GLOBAL("module._ImportsSize")
-.set_body([](TVMArgs args, TVMRetValue *ret) {
- *ret = static_cast<int64_t>(
- args[0].operator Module()->imports().size());
- });
-
-TVM_REGISTER_GLOBAL("module._GetImport")
-.set_body([](TVMArgs args, TVMRetValue *ret) {
- *ret = args[0].operator Module()->
- imports().at(args[1].operator int());
- });
-
-TVM_REGISTER_GLOBAL("module._GetTypeKey")
-.set_body([](TVMArgs args, TVMRetValue *ret) {
- *ret = std::string(args[0].operator Module()->type_key());
- });
-
-TVM_REGISTER_GLOBAL("module._LoadFromFile")
-.set_body([](TVMArgs args, TVMRetValue *ret) {
- *ret = Module::LoadFromFile(args[0], args[1]);
- });
-
-TVM_REGISTER_GLOBAL("module._SaveToFile")
-.set_body([](TVMArgs args, TVMRetValue *ret) {
- args[0].operator Module()->
- SaveToFile(args[1], args[2]);
- });
+TVM_REGISTER_GLOBAL("runtime.RuntimeEnabled")
+.set_body_typed(RuntimeEnabled);
+
+TVM_REGISTER_GLOBAL("runtime.ModuleGetSource")
+.set_body_typed([](Module mod, std::string fmt) {
+ return mod->GetSource(fmt);
+});
+
+TVM_REGISTER_GLOBAL("runtime.ModuleImportsSize")
+.set_body_typed([](Module mod) {
+ return static_cast<int64_t>(mod->imports().size());
+});
+
+TVM_REGISTER_GLOBAL("runtime.ModuleGetImport")
+.set_body_typed([](Module mod, int index) {
+ return mod->imports().at(index);
+});
+
+TVM_REGISTER_GLOBAL("runtime.ModuleGetTypeKey")
+.set_body_typed([](Module mod) {
+ return std::string(mod->type_key());
+});
+
+TVM_REGISTER_GLOBAL("runtime.ModuleLoadFromFile")
+.set_body_typed(Module::LoadFromFile);
+
+TVM_REGISTER_GLOBAL("runtime.ModuleSaveToFile")
+.set_body_typed([](Module mod, std::string name, std::string fmt) {
+ mod->SaveToFile(name, fmt);
+});
} // namespace runtime
} // namespace tvm
* \brief Object type management system.
*/
#include <dmlc/logging.h>
+#include <tvm/runtime/registry.h>
#include <tvm/runtime/object.h>
#include <mutex>
#include <string>
return TypeContext::Global()->TypeKey2Index(key);
}
+
+TVM_REGISTER_GLOBAL("runtime.ObjectHash")
+.set_body_typed([](ObjectRef obj) {
+ return static_cast<int64_t>(ObjectHash()(obj));
+});
} // namespace runtime
} // namespace tvm
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
- *
+ *
* http://www.apache.org/licenses/LICENSE-2.0
- *
+ *
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
return AOCLModuleCreate(data, fmt, fmap, std::string());
}
-TVM_REGISTER_GLOBAL("module.loadfile_aocx")
+TVM_REGISTER_GLOBAL("runtime.module.loadfile_aocx")
.set_body_typed(AOCLModuleLoadFile);
} // namespace runtime
return OpenCLModuleCreate(data, fmt, fmap, std::string());
}
-TVM_REGISTER_GLOBAL("module.loadfile_cl")
+TVM_REGISTER_GLOBAL("runtime.module.loadfile_cl")
.set_body_typed(OpenCLModuleLoadFile);
-TVM_REGISTER_GLOBAL("module.loadfile_clbin")
+TVM_REGISTER_GLOBAL("runtime.module.loadfile_clbin")
.set_body_typed(OpenCLModuleLoadFile);
-TVM_REGISTER_GLOBAL("module.loadbinary_opencl")
+TVM_REGISTER_GLOBAL("runtime.module.loadbinary_opencl")
.set_body_typed(OpenCLModuleLoadBinary);
} // namespace runtime
} // namespace tvm
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
- *
+ *
* http://www.apache.org/licenses/LICENSE-2.0
- *
+ *
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
return SDAccelModuleCreate(data, fmt, fmap, std::string());
}
-TVM_REGISTER_GLOBAL("module.loadfile_xclbin")
+TVM_REGISTER_GLOBAL("runtime.module.loadfile_xclbin")
.set_body_typed(SDAccelModuleLoadFile);
-TVM_REGISTER_GLOBAL("module.loadfile_awsxclbin")
+TVM_REGISTER_GLOBAL("runtime.module.loadfile_awsxclbin")
.set_body_typed(SDAccelModuleLoadFile);
} // namespace runtime
} // namespace tvm
return OpenGLModuleCreate(FromJSON(data), fmt, fmap);
}
-TVM_REGISTER_GLOBAL("module.loadfile_gl")
+TVM_REGISTER_GLOBAL("runtime.module.loadfile_gl")
.set_body([](TVMArgs args, TVMRetValue* rv) {
*rv = OpenGLModuleLoadFile(args[0], args[1]);
});
-TVM_REGISTER_GLOBAL("module.loadfile_glbin")
+TVM_REGISTER_GLOBAL("runtime.module.loadfile_glbin")
.set_body([](TVMArgs args, TVMRetValue* rv) {
*rv = OpenGLModuleLoadFile(args[0], args[1]);
});
-TVM_REGISTER_GLOBAL("module.loadbinary_opengl")
+TVM_REGISTER_GLOBAL("runtime.module.loadbinary_opengl")
.set_body([](TVMArgs args, TVMRetValue* rv) {
*rv = OpenGLModuleLoadBinary(args[0]);
});
}
-TVM_REGISTER_GLOBAL("module.loadbinary_hsaco")
+TVM_REGISTER_GLOBAL("runtime.module.loadbinary_hsaco")
.set_body_typed(ROCMModuleLoadBinary);
-TVM_REGISTER_GLOBAL("module.loadbinary_hip")
+TVM_REGISTER_GLOBAL("runtime.module.loadbinary_hip")
.set_body_typed(ROCMModuleLoadBinary);
-TVM_REGISTER_GLOBAL("module.loadfile_hsaco")
+TVM_REGISTER_GLOBAL("runtime.module.loadfile_hsaco")
.set_body_typed(ROCMModuleLoadFile);
-TVM_REGISTER_GLOBAL("module.loadfile_hip")
+TVM_REGISTER_GLOBAL("runtime.module.loadfile_hip")
.set_body_typed(ROCMModuleLoadFile);
} // namespace runtime
} // namespace tvm
return Module(n);
}
-TVM_REGISTER_GLOBAL("module._RPCTimeEvaluator")
+TVM_REGISTER_GLOBAL("runtime.RPCTimeEvaluator")
.set_body([](TVMArgs args, TVMRetValue* rv) {
Module m = args[0];
std::string tkey = m->type_key();
TVM_REGISTER_ENCLAVE_FUNC("__tvm_main__")
.set_body([](TVMArgs args, TVMRetValue* rv) {
- Module mod = (*Registry::Get("module._GetSystemLib"))();
+ Module mod = (*Registry::Get("runtime.SystemLib"))();
mod.GetFunction("default_function").CallPacked(args, rv);
});
} // extern "C"
} // namespace sgx
-TVM_REGISTER_GLOBAL("module.loadfile_sgx")
+TVM_REGISTER_GLOBAL("runtime.module.loadfile_sgx")
.set_body([](TVMArgs args, TVMRetValue* rv) {
std::shared_ptr<SGXModuleNode> node = std::make_shared<SGXModuleNode>();
node->Init(args[0]);
for (uint64_t i = 0; i < num_imports; ++i) {
std::string tkey;
CHECK(strm->Read(&tkey));
- std::string fkey = "module.loadbinary_" + tkey;
+ std::string fkey = "runtime.module.loadbinary_" + tkey;
const PackedFunc* f = Registry::Get(fkey);
CHECK(f != nullptr)
<< "Loader of " << tkey << "("
return StackVMModuleNode::Create(fmap, entry_func);
}
-TVM_REGISTER_GLOBAL("module.loadfile_stackvm")
+TVM_REGISTER_GLOBAL("runtime.module.loadfile_stackvm")
.set_body_typed(StackVMModuleNode::LoadFromFile);
} // namespace runtime
std::unordered_map<std::string, void*> tbl_;
};
-TVM_REGISTER_GLOBAL("module._GetSystemLib")
-.set_body([](TVMArgs args, TVMRetValue* rv) {
+TVM_REGISTER_GLOBAL("runtime.SystemLib")
+.set_body_typed([]() {
static auto mod = CreateModuleFromLibrary(
SystemLibrary::Global());
- *rv = mod;
- });
+ return mod;
+});
} // namespace runtime
} // namespace tvm
return VulkanModuleCreate(smap, fmap, "");
}
-TVM_REGISTER_GLOBAL("module.loadfile_vulkan").set_body_typed(VulkanModuleLoadFile);
+TVM_REGISTER_GLOBAL("runtime.module.loadfile_vulkan").set_body_typed(VulkanModuleLoadFile);
-TVM_REGISTER_GLOBAL("module.loadbinary_vulkan").set_body_typed(VulkanModuleLoadBinary);
+TVM_REGISTER_GLOBAL("runtime.module.loadbinary_vulkan").set_body_typed(VulkanModuleLoadBinary);
TVM_REGISTER_GLOBAL("device_api.vulkan").set_body([](TVMArgs args, TVMRetValue* rv) {
DeviceAPI* ptr = VulkanDeviceAPI::Global().get();
return (*codegen_f)(blob_byte_array, system_lib, target_triple);
}
+TVM_REGISTER_GLOBAL("codegen._Build")
+.set_body([](TVMArgs args, TVMRetValue *ret) {
+ if (args[0].IsObjectRef<tir::LoweredFunc>()) {
+ *ret = Build({args[0]}, args[1]);
+ } else {
+ *ret = Build(args[0], args[1]);
+ }
+ });
+
+// Export two auxiliary function to the runtime namespace.
+TVM_REGISTER_GLOBAL("runtime.ModulePackImportsToC")
+.set_body_typed(PackImportsToC);
+
+TVM_REGISTER_GLOBAL("runtime.ModulePackImportsToLLVM")
+.set_body_typed(PackImportsToLLVM);
+
} // namespace codegen
} // namespace tvm
*rv = major;
});
-TVM_REGISTER_GLOBAL("module.loadfile_ll")
+TVM_REGISTER_GLOBAL("runtime.module.loadfile_ll")
.set_body([](TVMArgs args, TVMRetValue* rv) {
auto n = make_object<LLVMModuleNode>();
n->LoadIR(args[0]);
return runtime::Module(n);
}
-TVM_REGISTER_GLOBAL("module.source_module_create")
+TVM_REGISTER_GLOBAL("runtime.SourceModuleCreate")
.set_body_typed(SourceModuleCreate);
-TVM_REGISTER_GLOBAL("module.csource_module_create")
+TVM_REGISTER_GLOBAL("runtime.CSourceModuleCreate")
.set_body_typed(CSourceModuleCreate);
} // namespace codegen
} // namespace tvm
* \file expr_operator.cc
*/
+#include <tvm/runtime/registry.h>
#include <tvm/tir/expr.h>
#include <tvm/tir/op.h>
#include <cmath>
return tir::CallNode::make(x.dtype(), "trunc", {x}, tir::CallNode::PureIntrinsic);
}
+
+// expose basic functions to node namespace
+TVM_REGISTER_GLOBAL("node._const")
+.set_body([](TVMArgs args, TVMRetValue* ret) {
+ if (args[0].type_code() == kDLInt) {
+ *ret = tir::make_const(args[1], args[0].operator int64_t());
+ } else if (args[0].type_code() == kDLFloat) {
+ *ret = tir::make_const(args[1], args[0].operator double());
+ } else {
+ LOG(FATAL) << "only accept int or float";
+ }
+ });
+
+TVM_REGISTER_GLOBAL("node.LargeUIntImm")
+.set_body_typed(LargeUIntImm);
+
+TVM_REGISTER_GLOBAL("node.String")
+.set_body_typed(tir::StringImmNode::make);
+
} // namespace tvm
using namespace tvm;
using namespace tvm::te;
- const runtime::PackedFunc* pf = runtime::Registry::Get("module._Enabled");
- bool enabled = (*pf)("cuda");
+ bool enabled = tvm::runtime::RuntimeEnabled("cuda");
if (!enabled) {
LOG(INFO) << "Skip heterogeneous test because cuda is not enabled."
<< "\n";
pf1(ObjectRef(x), NDArray());
// testcases for modules
- auto* pf = tvm::runtime::Registry::Get("module.source_module_create");
+ auto* pf = tvm::runtime::Registry::Get("runtime.SourceModuleCreate");
CHECK(pf != nullptr);
Module m = (*pf)("", "xyz");
rv = m;
return np.dot(a, b) + bb
def verify(target="llvm"):
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
print("skip because %s is not enabled..." % target)
return
if not tvm.get_global_func("tvm.contrib.cblas.matmul", True):
return topi.testing.batch_matmul(a, b)
def verify(target="llvm"):
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
print("skip because %s is not enabled..." % target)
return
if not tvm.get_global_func("tvm.contrib.cblas.matmul", True):
s = tvm.create_schedule(C.op)
def verify(target="cuda"):
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
print("skip because %s is not enabled..." % target)
return
if not tvm.get_global_func("tvm.contrib.cublas.matmul", True):
s = tvm.create_schedule(C.op)
def verify(target="cuda"):
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
print("skip because %s is not enabled..." % target)
return
if not tvm.get_global_func("tvm.contrib.cublaslt.matmul", True):
s = tvm.create_schedule(C.op)
def verify(target="cuda"):
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
print("skip because %s is not enabled..." % target)
return
if not tvm.get_global_func("tvm.contrib.cublas.matmul", True):
height = 32
weight = 32
- if not tvm.module.enabled("cuda"):
+ if not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled...")
return
if not tvm.get_global_func("tvm.contrib.cudnn.conv.output_shape", True):
height = 32
weight = 32
- if not tvm.module.enabled("cuda"):
+ if not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled...")
return
if not tvm.get_global_func("tvm.contrib.cudnn.conv.output_shape", True):
print("Peak {} Gops/s \n".format(peak))
def verify(target="llvm -mcpu=skylake-avx512"):
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
print("skip because %s is not enabled..." % target)
return
# (ignoring processor)" error with the following setting. After LLVM 8.0 is enabled in the
# test, we should use cascadelake setting.
def verify(target="llvm -mcpu=cascadelake"):
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
print("skip because %s is not enabled..." % target)
return
dilation_w = 1
xshape = [1, in_channel, 128, 128]
- if not tvm.module.enabled("rocm"):
+ if not tvm.runtime.enabled("rocm"):
print("skip because rocm is not enabled...")
return
if not tvm.get_global_func("tvm.contrib.miopen.conv2d.setup", True):
from tvm.contrib import mps
def test_matmul():
- if not tvm.module.enabled("metal"):
+ if not tvm.runtime.enabled("metal"):
print("skip because %s is not enabled..." % "metal")
return
n = 1024
verify(A, B, D, s)
def test_conv2d():
- if not tvm.module.enabled("metal"):
+ if not tvm.runtime.enabled("metal"):
print("skip because %s is not enabled..." % "metal")
return
n = 1
s = tvm.create_schedule(D.op)
def verify(target="llvm"):
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
pytest.skip("%s is not enabled..." % target)
if not tvm.get_global_func("tvm.contrib.nnpack.fully_connected_inference", True):
pytest.skip("extern function is not available")
def verify(target="llvm",
algorithm=nnpack.ConvolutionAlgorithm.AUTO,
with_bias=True):
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
pytest.skip("%s is not enabled..." % target)
if not tvm.get_global_func("tvm.contrib.nnpack.fully_connected_inference", True):
pytest.skip("extern function is not available")
def verify(target="llvm",
algorithm=nnpack.ConvolutionAlgorithm.AUTO,
with_bias=True):
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
pytest.skip("%s is not enabled..." % target)
if not tvm.get_global_func("tvm.contrib.nnpack.fully_connected_inference", True):
pytest.skip("extern function is not available")
s = tvm.create_schedule(A.op)
def verify(target="llvm"):
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
print("skip because %s is not enabled..." % target)
return
if not tvm.get_global_func("tvm.contrib.random.randint", True):
s = tvm.create_schedule(A.op)
def verify(target="llvm"):
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
print("skip because %s is not enabled..." % target)
return
if not tvm.get_global_func("tvm.contrib.random.uniform", True):
s = tvm.create_schedule(A.op)
def verify(target="llvm"):
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
print("skip because %s is not enabled..." % target)
return
if not tvm.get_global_func("tvm.contrib.random.normal", True):
s = tvm.create_schedule(C.op)
def verify(target="rocm"):
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
print("skip because %s is not enabled..." % target)
return
if not tvm.get_global_func("tvm.contrib.rocblas.matmul", True):
web_port = 8888
prox = proxy.Proxy("localhost", web_port=web_port)
def check():
- if not tvm.module.enabled("rpc"):
+ if not tvm.runtime.enabled("rpc"):
return
@tvm.register_func("rpc.test2.addone")
def addone(x):
fapi = lower(s, [A, B, C])
def verify(target):
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
print("Target %s is not enabled" % target)
return
f = tvm.codegen.build_module(fapi, target)
# one line to build the function.
def check_device(device, host="stackvm"):
- if not tvm.module.enabled(host):
+ if not tvm.runtime.enabled(host):
return
ctx = tvm.context(device, 0)
if not ctx.exist:
s[C].bind(tx, tvm.thread_axis("threadIdx.x"))
# one line to build the function.
def check_device(device, host="stackvm"):
- if not tvm.module.enabled(host):
+ if not tvm.runtime.enabled(host):
return
ctx = tvm.context(device, 0)
if not ctx.exist:
# create iter var and assign them tags.
bx, tx = s[B].split(B.op.axis[0], factor=32)
# one line to build the function.
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
flog = tvm.build(s, [A, B],
# one line to build the function.
def check_device(device, host="llvm"):
- if not tvm.module.enabled(host):
+ if not tvm.runtime.enabled(host):
return
ctx = tvm.context(device, 0)
if not ctx.exist:
# one line to build the function.
def check_device(device, host="llvm"):
- if not tvm.module.enabled(host):
+ if not tvm.runtime.enabled(host):
return
ctx = tvm.context(device, 0)
if not ctx.exist:
# one line to build the function.
def check_device(device, host="llvm"):
ctx = tvm.context(device, 0)
- if not tvm.module.enabled(host):
+ if not tvm.runtime.enabled(host):
return
if not ctx.exist:
print("skip because %s is not enabled.." % device)
s[BF].parallel(BF.op.axis[0])
# one line to build the function.
def check_target(target="llvm"):
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
return
ctx = tvm.cpu(0)
fapi = tvm.lower(s, args=[A, B])
s[BF].parallel(BF.op.axis[0])
# one line to build the function.
def check_target(target="llvm"):
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
return
ctx = tvm.cpu(0)
fapi = tvm.lower(s, args=[A, B])
def check_target():
device = 'cpu'
- if not tvm.module.enabled(device):
+ if not tvm.runtime.enabled(device):
print("skip because %s is not enabled.." % device)
return
ctx = tvm.context(device, 0)
def test_fp16_build():
dtype = "float16"
- if not tvm.module.enabled("cuda") or not tvm.gpu(0).exist:
+ if not tvm.runtime.enabled("cuda") or not tvm.gpu(0).exist:
print("skip because cuda is not enabled.")
return
def test_fp16_conversion():
def check_conversion(tgt, ctx):
- if not tvm.module.enabled(tgt):
+ if not tvm.runtime.enabled(tgt):
print("skip because {} is not enabled.".format(tgt))
return
elif tgt == "cuda" and ctx.exist and not have_fp16(ctx.compute_version):
lib_name = 'lib.so'
lib_path = tmp_path.relpath(lib_name)
lib.export_library(lib_path, fcompile=False, **kwargs)
- lib = tvm.module.load(lib_path)
+ lib = tvm.runtime.load_module(lib_path)
return lib
import numpy as np
import tvm
+import tvm.runtime._ffi_api
from tvm import relay
-from tvm import module as _tvm_module
from tvm.contrib import util
tmp_path = util.tempdir()
TVM_DLL_EXPORT_TYPED_FUNC(json_rt_0, ccompiler_wrapper_0_);
'''
- csource_module = _tvm_module.csource_module_create(code, "cc")
+ csource_module = tvm.runtime._ffi_api.CSourceModuleCreate(code, "cc")
return csource_module
extern "C" void json_1_(float* json_input4, float* json_input5,
float* json_input6, float* json_input7, float* out) {
-
+
std::string graph =
"add_2d,10,10\n"
"sub_2d,10,10\n"
extern "C" void json_0_(float* json_input0, float* json_input1,
float* json_input2, float* json_input3, float* out) {
-
+
std::string graph =
"add_2d,10,10\n"
"sub_2d,10,10\n"
'''
gen_json_engine()
- csource_module = _tvm_module.csource_module_create(code, "cc")
+ csource_module = tvm.runtime._ffi_api.CSourceModuleCreate(code, "cc")
return csource_module
lib_path = tmp_path.relpath(lib_name)
csource_module.export_library(lib_path, fcompile=False, **kwargs)
# load module for execution.
- lib = tvm.module.load(lib_path)
+ lib = tvm.runtime.load_module(lib_path)
mod = tvm.contrib.graph_runtime.create(graph_json, lib, tvm.cpu(0))
x_data = np.random.rand(10, 10).astype('float32')
lib = get_synthetic_lib()
- ext_lib = tvm.module.load(subgraph_path, "examplejson")
+ ext_lib = tvm.runtime.load_module(subgraph_path, "examplejson")
lib.import_module(ext_lib)
lib_name = 'external.so'
lib_path = tmp_path.relpath(lib_name)
lib.export_library(lib_path)
# load module for execution.
- lib = tvm.module.load(lib_path)
+ lib = tvm.runtime.load_module(lib_path)
mod = tvm.contrib.graph_runtime.create(graph_json, lib, tvm.cpu(0))
x_data = np.random.rand(10, 10).astype('float32')
func = relay.Function([cls_prob, bbox_pred, im_info], z)
func = run_infer_type(func)
for target in ['llvm', 'cuda']:
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
print("Skip test because %s is not enabled." % target)
continue
ctx = tvm.context(target, 0)
def test_check_run():
for dev, tgt in [("opencl", "opencl"), ("cuda", "cuda"),
("opencl", str(tvm.target.intel_graphics()))]:
- if not tvm.module.enabled(dev):
+ if not tvm.runtime.enabled(dev):
print("Skip test because %s is not enabled." % dev)
continue
run_fusible_network(dev, tgt)
def test_tuple_get_item():
dev = "cuda"
- if not tvm.module.enabled(dev):
+ if not tvm.runtime.enabled(dev):
print("Skip test because %s is not enabled." % dev)
return
lib_name = 'lib.so'
lib_path = tmp_path.relpath(lib_name)
lib.export_library(lib_path, fcompile=False, **kwargs)
- lib = tvm.module.load(lib_path)
+ lib = tvm.runtime.load_module(lib_path)
return lib
code, lib = exe.save()
assert isinstance(code, bytearray)
- assert isinstance(lib, tvm.module.Module)
+ assert isinstance(lib, tvm.runtime.Module)
def test_save_load():
with open(tmp.relpath("code.ro"), "wb") as fo:
fo.write(code)
- loaded_lib = tvm.module.load(path_lib)
+ loaded_lib = tvm.runtime.load_module(path_lib)
loaded_code = bytearray(open(tmp.relpath("code.ro"), "rb").read())
# deserialize.
def test_resnet18():
for device in ["llvm", "cuda"]:
- if not tvm.module.enabled(device):
+ if not tvm.runtime.enabled(device):
print("skip because %s is not enabled..." % device)
return
with open(temp.relpath("deploy_param.params"), "wb") as fo:
fo.write(relay.save_param_dict(graph_params))
- loaded_lib = tvm.module.load(path_lib)
+ loaded_lib = tvm.runtime.load_module(path_lib)
loaded_json = open(temp.relpath("deploy_graph.json")).read()
loaded_params = bytearray(open(temp.relpath("deploy_param.params"), "rb").read())
data = np.random.uniform(-1, 1, size=(1, 3, 224, 224)).astype("float32")
def test_system_lib():
ctx = tvm.gpu(0)
for device in ["llvm", "cuda"]:
- if not tvm.module.enabled(device):
+ if not tvm.runtime.enabled(device):
print("skip because %s is not enabled..." % device)
return
nn = 12
# Load dll, will trigger system library registration
dll = ctypes.CDLL(path_lib)
# Load the system wide library
- m = tvm.module.system_lib()
+ m = tvm.runtime.system_lib()
a = tvm.nd.array(np.random.uniform(size=nn).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(nn, dtype=A.dtype), ctx)
m['add'](a, b)
def check_llvm():
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
s = tvm.create_schedule(D.op)
xo, xi = s[C].split(C.op.axis[0], factor=4)
temp = util.tempdir()
path_dso = temp.relpath("temp.so")
mhost.export_library(path_dso)
- m = tvm.module.load(path_dso)
+ m = tvm.runtime.load_module(path_dso)
fadd = m['fadd']
ctx = tvm.cpu(0)
# launch the kernel.
temp = util.tempdir()
path_dso = temp.relpath("temp.so")
mhost.export_library(path_dso)
- m = tvm.module.load(path_dso)
+ m = tvm.runtime.load_module(path_dso)
fadd = m["fadd_pipeline"]
ctx = tvm.cpu(0)
# launch the kernel.
temp = util.tempdir()
path_dso = temp.relpath("temp.so")
mhost.export_library(path_dso)
- m = tvm.module.load(path_dso)
+ m = tvm.runtime.load_module(path_dso)
fadd = m['reinterpret']
ctx = tvm.cpu(0)
n = nn
assert struct.unpack(endian + 'h', arr[0x12:0x14])[0] == e_machine
def build_i386():
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
print("Skip because llvm is not enabled..")
return
temp = util.tempdir()
def build_arm():
target = "llvm -target=armv7-none-linux-gnueabihf"
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
print("Skip because %s is not enabled.." % target)
return
temp = util.tempdir()
def test_cuda_vectorize_add():
num_thread = 8
def check_cuda(dtype, n, lanes):
- if not tvm.gpu(0).exist or not tvm.module.enabled("cuda"):
+ if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled..")
return
if dtype == "float16":
def test_cuda_multiply_add():
num_thread = 8
def check_cuda(dtype, n, lanes):
- if not tvm.gpu(0).exist or not tvm.module.enabled("cuda"):
+ if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled..")
return
if dtype == "int8" and not have_int8(tvm.gpu(0).compute_version):
def test_cuda_vectorize_load():
num_thread = 8
def check_cuda(dtype, n, lanes):
- if not tvm.gpu(0).exist or not tvm.module.enabled("cuda"):
+ if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled..")
return
ctx = tvm.gpu(0)
def test_cuda_make_int8x4():
def check_cuda(n, value):
- if not tvm.gpu(0).exist or not tvm.module.enabled("cuda"):
+ if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled..")
return
lanes = 4
# Only need to test compiling here
fun(a, c)
- if not tvm.gpu(0).exist or not tvm.module.enabled("cuda"):
+ if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled..")
return
def test_cuda_shuffle():
- if not tvm.gpu(0).exist or not tvm.module.enabled("cuda"):
+ if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled..")
return
def test_cuda_reducition_binding():
- if not tvm.gpu(0).exist or not tvm.module.enabled("cuda"):
+ if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled..")
return
fcuda = tvm.build(s, [A, B], "cuda")
def test_rfactor_predicates():
- if not tvm.gpu(0).exist or not tvm.module.enabled("cuda"):
+ if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled..")
return
fcuda = tvm.build(s, [A, B], "cuda")
-@unittest.skipIf(not tvm.gpu(0).exist or not tvm.module.enabled("cuda"), "skip because cuda is not enabled..")
+@unittest.skipIf(not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"), "skip because cuda is not enabled..")
def test_cuda_const_float_to_half():
# This import is required to use nvcc to perform code gen;
# otherwise it is found that the code gen is done by nvrtc.
ctx = tvm.context(device, 0)
if not ctx.exist:
return
- if not tvm.module.enabled(host):
+ if not tvm.runtime.enabled(host):
return
mhost = tvm.codegen.build_module(fsplits[0], host)
mdev = tvm.codegen.build_module(fsplits[1:], device)
ctx = tvm.context(device, 0)
if not ctx.exist:
return
- if not tvm.module.enabled(host):
+ if not tvm.runtime.enabled(host):
return
if device == "cuda":
fmt = "ptx"
temp = util.tempdir()
mpath = temp.relpath("test.%s" % fmt)
mdev.save(mpath)
- mdev2 = tvm.module.load(mpath)
+ mdev2 = tvm.runtime.load_module(mpath)
mhost.import_module(mdev2)
f = mhost.entry_func
# launch the kernel.
print(tvm.lower(s_gpu, [A, C_gpu], simple_mode=True))
def check_target(target):
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
return
s = s_gpu if target in ['opencl', 'cuda'] else s_cpu
C = C_gpu if target in ['opencl', 'cuda'] else C_cpu
def check_target(target):
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
return
# build and invoke the kernel.
f = tvm.build(s, [A, C], target)
s = tvm.create_schedule(C.op)
def check_target(target):
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
return
# build and invoke the kernel.
f = tvm.build(s, [A, C], target)
tvm.call_pure_extern("float32", "my_add", A(*i), 1.0),
name='B')
def check_llvm(use_file):
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
if not clang.find_clang(required=False):
print("skip because clang is not available")
s = tvm.create_schedule(A.op)
def check_llvm():
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
f = tvm.build(s, [A], "llvm")
ctx = tvm.cpu(0)
s[C].vectorize(xi)
def check_llvm():
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
# Specifically allow offset to test codepath when offset is available
Ab = tvm.decl_buffer(
s[C].pragma(xi, "parallel_stride_pattern")
def check_llvm():
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
# BUILD and invoke the kernel.
f = tvm.build(s, [A, C], "llvm")
def test_llvm_flip_pipeline():
def check_llvm(nn, base):
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
n = tvm.convert(nn)
A = tvm.placeholder((n + base), name='A')
def test_llvm_vadd_pipeline():
def check_llvm(n, lanes):
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
A = tvm.placeholder((n,), name='A', dtype="float32x%d" % lanes)
B = tvm.compute((n,), lambda i: A[i], name='B')
def test_llvm_madd_pipeline():
def check_llvm(nn, base, stride):
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
n = tvm.convert(nn)
A = tvm.placeholder((n + base, stride), name='A')
s = tvm.create_schedule(C.op)
def check_llvm():
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
# build and invoke the kernel.
f = tvm.build(s, [A, C], "llvm")
s[C].parallel(xo)
s[C].vectorize(xi)
def check_llvm():
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
# build two functions
f2 = tvm.lower(s, [A, B, C], name="fadd1")
def test_llvm_condition():
def check_llvm(n, offset):
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
A = tvm.placeholder((n, ), name='A')
C = tvm.compute((n,), lambda i: tvm.if_then_else(i >= offset, A[i], 0.0), name='C')
def test_llvm_bool():
def check_llvm(n):
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
A = tvm.placeholder((n, ), name='A', dtype="int32")
C = tvm.compute((n,), lambda i: A[i].equal(1).astype("float"), name='C')
def test_rank_zero():
def check_llvm(n):
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
A = tvm.placeholder((n, ), name='A')
scale = tvm.placeholder((), name='scale')
def test_rank_zero_bound_checkers():
def check_llvm(n):
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
with tvm.build_config(instrument_bound_checkers=True):
A = tvm.placeholder((n, ), name='A')
s[C].parallel(xo)
s[C].vectorize(xi)
def check_llvm_object():
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
if tvm.codegen.llvm_version_major() < 5:
return
assert re.search(r"""DW_AT_name.*fadd2""", str(output))
def check_llvm_ir():
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
if tvm.codegen.llvm_version_major() < 5:
return
# Only need to test compiling here
fun(a, c)
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
print("skip because opencl is not enabled..")
return
# Only need to test compiling here
fun(a, c)
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
print("skip because opencl is not enabled..")
return
bx = tvm.thread_axis("blockIdx.x")
by = tvm.thread_axis("blockIdx.y")
-@unittest.skipIf(not tvm.rocm(0).exist or not tvm.module.enabled("rocm"), "skip because rocm is not enabled..")
+@unittest.skipIf(not tvm.rocm(0).exist or not tvm.runtime.enabled("rocm"), "skip because rocm is not enabled..")
def test_rocm_cross_thread_reduction():
# based on the reduction tutorial
n = tvm.size_var("n")
ko, ki = s[B].split(B.op.reduce_axis[0], factor=16)
BF = s.rfactor(B, ki)
xo, xi = s[B].split(s[B].op.axis[0], factor=32)
- s[B].bind(xo, bx)
+ s[B].bind(xo, bx)
s[B].bind(xi, ty)
s[B].bind(s[B].op.reduce_axis[0], tx)
s[BF].compute_at(s[B], s[B].op.reduce_axis[0])
tvm.testing.assert_allclose(
b.asnumpy(), np.sum(a.asnumpy(), axis=1), rtol=1e-4)
-
-@unittest.skipIf(not tvm.rocm(0).exist or not tvm.module.enabled("rocm"), "skip because rocm is not enabled..")
+
+@unittest.skipIf(not tvm.rocm(0).exist or not tvm.runtime.enabled("rocm"), "skip because rocm is not enabled..")
def test_rocm_inf_nan():
def check_inf_nan(ctx, n, value, dtype):
A = tvm.placeholder((n,), name='A', dtype=dtype)
check_inf_nan(ctx, 1, float('nan'), 'float32')
check_inf_nan(ctx, 1, float('nan'), 'float64')
-@unittest.skipIf(not tvm.rocm(0).exist or not tvm.module.enabled("rocm"), "skip because rocm is not enabled..")
+@unittest.skipIf(not tvm.rocm(0).exist or not tvm.runtime.enabled("rocm"), "skip because rocm is not enabled..")
def test_rocm_reducition_binding():
k = tvm.reduce_axis((0, 32), 'k')
A = tvm.placeholder((96, 32), name='A')
mo, _ = s[B].split(B.op.axis[0], 32)
s[B].bind(mo, bx)
-@unittest.skipIf(not tvm.rocm(0).exist or not tvm.module.enabled("rocm"), "skip because rocm is not enabled..")
+@unittest.skipIf(not tvm.rocm(0).exist or not tvm.runtime.enabled("rocm"), "skip because rocm is not enabled..")
def test_rocm_copy():
def check_rocm(dtype, n):
peturb = np.random.uniform(low=0.5, high=1.5)
check_rocm(dtype, int(peturb * (2 ** logN)))
-@unittest.skipIf(not tvm.rocm(0).exist or not tvm.module.enabled("rocm"), "skip because rocm is not enabled..")
+@unittest.skipIf(not tvm.rocm(0).exist or not tvm.runtime.enabled("rocm"), "skip because rocm is not enabled..")
def test_rocm_vectorize_add():
num_thread = 8
def run_jit(fapi, check):
for target in ["llvm", "stackvm"]:
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
continue
f = tvm.codegen.build_module(fapi, target)
s = f.get_source()
def test_vector_comparison():
- if not tvm.module.enabled("vulkan"):
+ if not tvm.runtime.enabled("vulkan"):
print("Skipping due to no Vulkan module")
return
def test_vulkan_copy():
def check_vulkan(dtype, n):
- if not tvm.vulkan(0).exist or not tvm.module.enabled("vulkan"):
+ if not tvm.vulkan(0).exist or not tvm.runtime.enabled("vulkan"):
print("skip because vulkan is not enabled..")
return
A = tvm.placeholder((n,), name='A', dtype=dtype)
num_thread = 8
def check_vulkan(dtype, n, lanes):
- if not tvm.vulkan(0).exist or not tvm.module.enabled("vulkan"):
+ if not tvm.vulkan(0).exist or not tvm.runtime.enabled("vulkan"):
print("skip because vulkan is not enabled..")
return
A = tvm.placeholder((n,), name='A', dtype="%sx%d" % (dtype, lanes))
def run_stress():
def worker():
- if not tvm.vulkan(0).exist or not tvm.module.enabled("vulkan"):
+ if not tvm.vulkan(0).exist or not tvm.runtime.enabled("vulkan"):
print("skip because vulkan is not enabled..")
return
A = tvm.placeholder((n,), name='A', dtype="float32")
name="vector_add", dtype=dtype)
s = tvm.create_schedule(C.op)
def check_target(target):
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
return
# build and invoke the kernel.
fadd = tvm.build(s, [A, B, C], target)
stmt = tvm.schedule.ScheduleOps(s, bounds)
def check_target(target):
n = 1024
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
return
# build and invoke the kernel.
fadd = tvm.build(s, [A, B, C], target)
s = tvm.create_schedule(C.op)
def check():
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
fadd = tvm.build(s, [A, B, C], target='llvm', name='bcast_add', binds={A:Ab, B:Bb})
ctx = tvm.cpu(0)
s = tvm.create_schedule(C.op)
def check_stride():
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
fadd = tvm.build(s, [A, B, C, o1, x], target='llvm', name='bcast_add',
binds={A:Ab, B:Bb, C:Cc})
tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy())
def check_no_stride():
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
fadd = tvm.build(s, [A, B, C, o1, x], target='llvm', name='bcast_add',
binds={A: Ab, B: Bb, C: Cc})
tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy())
def check_auto_bind():
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
# Let build bind buffers
fadd = tvm.build(s, [A, B, C, o1, x], target='llvm', name='bcast_add')
@pytest.mark.xfail
def test_out_of_bounds_tensors_with_zero_shape_op_with_not_zero_shape_llvm():
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
n = 64
A = tvm.placeholder((n, ), name='A')
C = tvm.compute((n,), make_binds)
s = tvm.create_schedule([C.op])
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
f = tvm.build(s, [A, B, C], "llvm")
np.testing.assert_allclose(c_np, c.asnumpy(), rtol=1e-3)
def test_tensor_core_matmul():
- if not tvm.gpu(0).exist or not tvm.module.enabled("cuda"):
+ if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled..")
return
if not nvcc.have_tensorcore(tvm.gpu(0).compute_version):
tensor_core_matmul(32) #test with warp_tile 32x8x16
def test_tensor_core_batch_matmul():
- if not tvm.gpu(0).exist or not tvm.module.enabled("cuda"):
+ if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled..")
return
if not nvcc.have_tensorcore(tvm.gpu(0).compute_version):
graph = json.dumps(graph)
def check_verify():
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
print("Skip because llvm is not enabled")
return
mlib = tvm.build(s, [A, B], "llvm", name="myadd")
np.testing.assert_equal(out.asnumpy(), a + 1)
def check_remote():
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
print("Skip because llvm is not enabled")
return
mlib = tvm.build(s, [A, B], "llvm", name="myadd")
params = {'x': x_in}
graph, lib, params = relay.build(func, target="llvm", params=params)
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
print("Skip because llvm is not enabled")
return
mod_shared = graph_runtime.create(graph, lib, tvm.cpu(0))
graph = json.dumps(graph)
def check_verify():
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
print("Skip because llvm is not enabled")
return
mlib = tvm.build(s, [A, B], "llvm", name="myadd")
assert(not os.path.exists(directory))
def check_remote():
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
print("Skip because llvm is not enabled")
return
mlib = tvm.build(s, [A, B], "llvm", name="myadd")
host = "cpu"
target_host = "llvm"
host_ctx = tvm.context(host)
- if not tvm.module.enabled(target_host):
+ if not tvm.runtime.enabled(target_host):
print("Skip test because llvm is not enabled.")
return
def check_device(device, target_device):
- if not tvm.module.enabled(target_device):
+ if not tvm.runtime.enabled(target_device):
print("Skip test because {} is not enabled.".format(target_device))
return
host = "cpu"
target_host = "llvm"
host_ctx = tvm.context(host)
- if not tvm.module.enabled(target_host):
+ if not tvm.runtime.enabled(target_host):
print("Skip test because llvm is not enabled.")
return
def check_device(device, target_device):
- if not tvm.module.enabled(target_device):
+ if not tvm.runtime.enabled(target_device):
print("Skip test because {} is not enabled.".format(target_device))
return
mhost.export_library(path_lib)
with open(temp.relpath("deploy.json"), "w") as out_file:
out_file.write(graph)
- loaded_lib = tvm.module.load(path_lib)
+ loaded_lib = tvm.runtime.load_module(path_lib)
loaded_graph = open(temp.relpath("deploy.json")).read()
mod = graph_runtime.create(loaded_graph, loaded_lib, ctx)
mod.set_input(**params)
Return
------
- mod : tvm.module.Module
+ mod : tvm.runtime.Module
graph runtime module for the target device
"""
with tvm.build_config(disable_vectorize=True):
def test_alloc():
"""Test tensor allocation on the device."""
- if not tvm.module.enabled("micro_dev"):
+ if not tvm.runtime.enabled("micro_dev"):
return
shape = (1024,)
dtype = "float32"
def test_add():
"""Test a module which performs addition."""
- if not tvm.module.enabled("micro_dev"):
+ if not tvm.runtime.enabled("micro_dev"):
return
shape = (1024,)
dtype = "float32"
def test_workspace_add():
"""Test a module which uses a workspace to compute an intermediate value."""
- if not tvm.module.enabled("micro_dev"):
+ if not tvm.runtime.enabled("micro_dev"):
return
shape = (1024,)
dtype = "float32"
def test_graph_runtime():
"""Test a program which uses the graph runtime."""
- if not tvm.module.enabled("micro_dev"):
+ if not tvm.runtime.enabled("micro_dev"):
return
shape = (1024,)
dtype = "float32"
def test_multiple_modules():
"""Test loading multiple modules on the device simultaneously."""
- if not tvm.module.enabled("micro_dev"):
+ if not tvm.runtime.enabled("micro_dev"):
return
shape = (1024,)
dtype = "float32"
def test_interleave_sessions():
"""Test closing and reopening sessions."""
- if not tvm.module.enabled("micro_dev"):
+ if not tvm.runtime.enabled("micro_dev"):
return
shape = (1024,)
dtype = "float32"
def test_nested_sessions():
"""Test entering and exiting nested session contexts."""
- if not tvm.module.enabled("micro_dev"):
+ if not tvm.runtime.enabled("micro_dev"):
return
shape = (1024,)
dtype = "float32"
def test_inactive_session_use():
"""Test the use of objects allocated in a session that is no longer active."""
- if not tvm.module.enabled("micro_dev"):
+ if not tvm.runtime.enabled("micro_dev"):
return
shape = (1024,)
dtype = "float32"
Engine engine;
}
'''
+ import tvm.runtime._ffi_api
gen_engine_header()
- csource_module = tvm.module.csource_module_create(code, "cc")
+ csource_module = tvm.runtime._ffi_api.CSourceModuleCreate(code, "cc")
return csource_module
def test_mod_export():
def verify_gpu_mod_export(obj_format):
for device in ["llvm", "cuda"]:
- if not tvm.module.enabled(device):
+ if not tvm.runtime.enabled(device):
print("skip because %s is not enabled..." % device)
return
path_lib = temp.relpath(file_name)
resnet18_gpu_lib.imported_modules[0].import_module(resnet50_cpu_lib)
resnet18_gpu_lib.export_library(path_lib)
- loaded_lib = tvm.module.load(path_lib)
+ loaded_lib = tvm.runtime.load_module(path_lib)
assert loaded_lib.type_key == "library"
assert loaded_lib.imported_modules[0].type_key == "cuda"
assert loaded_lib.imported_modules[0].imported_modules[0].type_key == "library"
def verify_multi_dso_mod_export(obj_format):
for device in ["llvm"]:
- if not tvm.module.enabled(device):
+ if not tvm.runtime.enabled(device):
print("skip because %s is not enabled..." % device)
return
path_lib = temp.relpath(file_name)
resnet18_cpu_lib.import_module(f)
resnet18_cpu_lib.export_library(path_lib)
- loaded_lib = tvm.module.load(path_lib)
+ loaded_lib = tvm.runtime.load_module(path_lib)
assert loaded_lib.type_key == "library"
assert loaded_lib.imported_modules[0].type_key == "library"
def verify_json_import_dso(obj_format):
for device in ["llvm"]:
- if not tvm.module.enabled(device):
+ if not tvm.runtime.enabled(device):
print("skip because %s is not enabled..." % device)
return
s = tvm.create_schedule(B.op)
f = tvm.build(s, [A, B], "llvm", name="myadd")
try:
- ext_lib = tvm.module.load(subgraph_path, "examplejson")
+ ext_lib = tvm.runtime.load_module(subgraph_path, "examplejson")
except:
print("skip because Loader of examplejson is not presented")
return
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
ext_lib.export_library(path_lib)
- lib = tvm.module.load(path_lib)
+ lib = tvm.runtime.load_module(path_lib)
assert lib.type_key == "examplejson"
assert lib.imported_modules[0].type_key == "library"
print("Skip test because gcc is not available.")
for device in ["llvm"]:
- if not tvm.module.enabled(device):
+ if not tvm.runtime.enabled(device):
print("skip because %s is not enabled..." % device)
return
resnet18_cpu_lib.import_module(engine_module)
kwargs = {"options": ["-O2", "-std=c++11", "-I" + header_file_dir_path.relpath("")]}
resnet18_cpu_lib.export_library(path_lib, fcompile=False, **kwargs)
- loaded_lib = tvm.module.load(path_lib)
+ loaded_lib = tvm.runtime.load_module(path_lib)
assert loaded_lib.type_key == "library"
assert loaded_lib.imported_modules[0].type_key == "library"
assert loaded_lib.imported_modules[1].type_key == "library"
import numpy as np
path_dso = sys.argv[1]
dtype = sys.argv[2]
-ff = tvm.module.load(path_dso)
+ff = tvm.runtime.load_module(path_dso)
a = tvm.nd.array(np.zeros(10, dtype=dtype))
ff(a)
np.testing.assert_equal(a.asnumpy(), np.arange(a.shape[0]))
"""
def test_dso_module_load():
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
dtype = 'int64'
temp = util.tempdir()
save_object([path_obj, path_ll, path_bc])
cc.create_shared(path_dso, [path_obj])
- f1 = tvm.module.load(path_dso)
- f2 = tvm.module.load(path_ll)
+ f1 = tvm.runtime.load_module(path_dso)
+ f2 = tvm.runtime.load_module(path_ll)
a = tvm.nd.array(np.zeros(10, dtype=dtype))
f1(a)
np.testing.assert_equal(a.asnumpy(), np.arange(a.shape[0]))
# test cross compiler function
f.export_library(path_dso, cc.cross_compiler("g++"))
- f1 = tvm.module.load(path_dso)
+ f1 = tvm.runtime.load_module(path_dso)
a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), ctx)
f1(a, b)
np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)
if sys.platform != "win32":
- f2 = tvm.module.system_lib()
+ f2 = tvm.runtime.system_lib()
f2[name](a, b)
np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)
f = tvm.build(s, [A, B], device, "stackvm", name=name)
path_dso = temp.relpath("dev_lib.stackvm")
f.export_library(path_dso)
- f1 = tvm.module.load(path_dso)
+ f1 = tvm.runtime.load_module(path_dso)
a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), ctx)
f(a, b)
def check_llvm():
ctx = tvm.cpu(0)
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
print("Skip because llvm is not enabled" )
return
temp = util.tempdir()
fadd2.save(path2)
# create shared library with multiple functions
cc.create_shared(path_dso, [path1, path2])
- m = tvm.module.load(path_dso)
+ m = tvm.runtime.load_module(path_dso)
fadd1 = m['myadd1']
fadd2 = m['myadd2']
a = tvm.nd.array(np.random.uniform(size=nn).astype(A.dtype), ctx)
def check_system_lib():
ctx = tvm.cpu(0)
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
print("Skip because llvm is not enabled" )
return
temp = util.tempdir()
# Load dll, will trigger system library registration
dll = ctypes.CDLL(path_dso)
# Load the system wide library
- mm = tvm.module.system_lib()
+ mm = tvm.runtime.system_lib()
a = tvm.nd.array(np.random.uniform(size=nn).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(nn, dtype=A.dtype), ctx)
mm['myadd1'](a, b)
return 10
# get it out from global function table
f = tvm.get_global_func("my_packed_func")
- assert isinstance(f, tvm.Function)
+ assert isinstance(f, tvm.runtime.PackedFunc)
y = f(*targs)
assert y == 10
# get it out from global function table
f = tvm.get_global_func("my_callback_with_node")
- assert isinstance(f, tvm.Function)
+ assert isinstance(f, tvm.runtime.PackedFunc)
y = f(x, f2)
assert(y.value == 10)
assert(tuple(args) == targs)
f = tvm.convert(myfunc)
- assert isinstance(f, tvm.Function)
+ assert isinstance(f, tvm.runtime.PackedFunc)
def test_byte_array():
s = "hello"
def test_rpc_simple():
- if not tvm.module.enabled("rpc"):
+ if not tvm.runtime.enabled("rpc"):
return
@tvm.register_func("rpc.test.addone")
def addone(x):
assert f2("abc", 11) == "abc:11"
def test_rpc_array():
- if not tvm.module.enabled("rpc"):
+ if not tvm.runtime.enabled("rpc"):
return
x = np.random.randint(0, 10, size=(3, 4))
@tvm.register_func("rpc.test.remote_array_func")
fremote(r_cpu)
def test_rpc_file_exchange():
- if not tvm.module.enabled("rpc"):
+ if not tvm.runtime.enabled("rpc"):
return
server = rpc.Server("localhost")
remote = rpc.connect(server.host, server.port)
assert(rev == blob)
def test_rpc_remote_module():
- if not tvm.module.enabled("rpc"):
+ if not tvm.runtime.enabled("rpc"):
return
server = rpc.Server("localhost")
client = rpc.connect(server.host, server.port)
s = tvm.create_schedule(B.op)
def check_remote(remote):
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
print("Skip because llvm is not enabled")
return
temp = util.tempdir()
runtime initializes. We leave it as an example
on how to do rpc when we want to do linking on remote.
"""
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
print("Skip because llvm is not enabled")
return
- if not tvm.module.enabled("opencl"):
+ if not tvm.runtime.enabled("opencl"):
print("Skip because opencl is not enabled")
return
temp = util.tempdir()
def test_tensor_core_batch_matmal():
- if not tvm.gpu(0).exist or not tvm.module.enabled("cuda"):
+ if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled..")
return
if not nvcc.have_tensorcore(tvm.gpu(0).compute_version):
def test_tensor_core_batch_conv():
- if not tvm.gpu(0).exist or not tvm.module.enabled("cuda"):
+ if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
print("skip because cuda is not enabled..")
return
if not nvcc.have_tensorcore(tvm.gpu(0).compute_version):
def prepare_test_libs(base_path):
target = "llvm -target=asmjs-unknown-emscripten -system-lib"
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
raise RuntimeError("Target %s is not enbaled" % target)
n = tvm.var("n")
A = tvm.placeholder((n,), name='A')
proxy_port = 9090
def test_rpc_array():
- if not tvm.module.enabled("rpc"):
+ if not tvm.runtime.enabled("rpc"):
return
# graph
n = tvm.convert(1024)
remote = rpc.connect(proxy_host, proxy_port, key="js")
target = "llvm -target=asmjs-unknown-emscripten -system-lib"
def check_remote():
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
print("Skip because %s is not enabled" % target)
return
temp = util.tempdir()
import numpy as np
def test_local_gemm():
- if not tvm.module.enabled("opengl"):
+ if not tvm.runtime.enabled("opengl"):
return
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
nn = 1024
import numpy as np
def test_local_multi_stage():
- if not tvm.module.enabled("opengl"):
+ if not tvm.runtime.enabled("opengl"):
return
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
n = tvm.var("n")
from tvm.contrib import util, emscripten
def test_local_save_load():
- if not tvm.module.enabled("opengl"):
+ if not tvm.runtime.enabled("opengl"):
return
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
n = tvm.var("n")
temp = util.tempdir()
path_so = temp.relpath("myadd.so")
f.export_library(path_so)
- f1 = tvm.module.load(path_so)
+ f1 = tvm.runtime.load_module(path_so)
f1(a, b, c)
tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy())
a_np, b_np, c_np, d_np = get_ref_data()
def check_device(device):
- if not tvm.module.enabled(device):
+ if not tvm.runtime.enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
b_np = np.maximum(b_np, 0.0)
def check_device(device):
- if not tvm.module.enabled(device):
+ if not tvm.runtime.enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
b_np = np.maximum(b_np, 0.0)
def check_device(device):
- if not tvm.module.enabled(device):
+ if not tvm.runtime.enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
b_np = topi.testing.softmax_python(a_np)
def check_device(device):
- if not tvm.module.enabled(device):
+ if not tvm.runtime.enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
b_np = topi.testing.log_softmax_python(a_np)
def check_device(device):
- if not tvm.module.enabled(device):
+ if not tvm.runtime.enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
proxy_port = 9090
def try_remote_save_load():
- if not tvm.module.enabled("rpc"):
+ if not tvm.runtime.enabled("rpc"):
return
- if not tvm.module.enabled("opengl"):
+ if not tvm.runtime.enabled("opengl"):
return
- if not tvm.module.enabled("llvm"):
+ if not tvm.runtime.enabled("llvm"):
return
# Build the module.
threshold : tvm.const
Threshold to be a positive prediction.
- variances : tvm.ndarray
+ variances : tvm.nd.NDArray
Variances to be decoded from box regression output.
Returns
shift_np = np.random.uniform(size=(in_channel * channel_multiplier)).astype(Shift.dtype)
def check_device(device):
- if not tvm.module.enabled(device):
+ if not tvm.runtime.enabled(device):
print("Skip because %s is not enabled" % device)
return
ctx = tvm.context(device, 0)
shift_np = np.random.uniform(size=(in_channel * channel_multiplier)).astype(Shift.dtype)
def check_device(device):
- if not tvm.module.enabled(device):
+ if not tvm.runtime.enabled(device):
print("Skip because %s is not enabled" % device)
return
ctx = tvm.context(device, 0)
c_np = np.maximum(b_np, 0)
def check_device(device):
- if not tvm.module.enabled(device):
+ if not tvm.runtime.enabled(device):
print("Skip because %s is not enabled" % device)
return
ctx = tvm.context(device, 0)
a_np, w_np, b_np = get_ref_data()
def check_device(device):
- if not tvm.module.enabled(device):
+ if not tvm.runtime.enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
a_np, w_np, b_np = get_ref_data()
def check_device(device):
- if not tvm.module.enabled(device):
+ if not tvm.runtime.enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
b_np = topi.testing.lrn_python(a_np, size, axis, bias, alpha, beta)
def check_device(device):
- if not tvm.module.enabled(device):
+ if not tvm.runtime.enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
np_nd = get_ref_data()
def check_device(device):
- if not tvm.module.enabled(device):
+ if not tvm.runtime.enabled(device):
print("Skip because %s is not enabled" % device)
return
np_nd = get_ref_data()
def check_device(device):
- if not tvm.module.enabled(device):
+ if not tvm.runtime.enabled(device):
print("Skip because %s is not enabled" % device)
return
s = tvm.create_schedule([C.op])
def check_device(device):
- if not tvm.module.enabled(device):
+ if not tvm.runtime.enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
# load the module back.
loaded_json = open(temp.relpath("deploy_graph.json")).read()
-loaded_lib = tvm.module.load(path_lib)
+loaded_lib = tvm.runtime.load_module(path_lib)
loaded_params = bytearray(open(temp.relpath("deploy_param.params"), "rb").read())
input_data = tvm.nd.array(np.random.uniform(size=data_shape).astype("float32"))
# The following code loads the host and device module separately and
# re-links them together. We can verify that the newly loaded function works.
#
-fadd1 = tvm.module.load(temp.relpath("myadd.so"))
+fadd1 = tvm.runtime.load_module(temp.relpath("myadd.so"))
if tgt == "cuda":
- fadd1_dev = tvm.module.load(temp.relpath("myadd.ptx"))
+ fadd1_dev = tvm.runtime.load_module(temp.relpath("myadd.ptx"))
fadd1.import_module(fadd1_dev)
if tgt == "rocm":
- fadd1_dev = tvm.module.load(temp.relpath("myadd.hsaco"))
+ fadd1_dev = tvm.runtime.load_module(temp.relpath("myadd.hsaco"))
fadd1.import_module(fadd1_dev)
if tgt.startswith('opencl'):
- fadd1_dev = tvm.module.load(temp.relpath("myadd.cl"))
+ fadd1_dev = tvm.runtime.load_module(temp.relpath("myadd.cl"))
fadd1.import_module(fadd1_dev)
fadd1(a, b, c)
# Currently we support packing of Metal, OpenCL and CUDA modules.
#
fadd.export_library(temp.relpath("myadd_pack.so"))
-fadd2 = tvm.module.load(temp.relpath("myadd_pack.so"))
+fadd2 = tvm.runtime.load_module(temp.relpath("myadd_pack.so"))
fadd2(a, b, c)
tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy())
if hw_backend in ("verilog", "chisel"):
hw_lib = osp.join(cur_path, "..", "hardware", hw_backend, "build", hw_libname)
load_sw()
- m = tvm.module.load(hw_lib, "vta-tsim")
+ m = tvm.runtime.load_module(hw_lib, "vta-tsim")
f = tvm.get_global_func("tvm.vta.tsim.init")
f(m)
if hw_backend in ("verilog", "chisel"):
hw_lib = osp.join(cur_path, "..", "hardware", hw_backend, "build", hw_libname)
load_sw()
- m = tvm.module.load(hw_lib, "vta-tsim")
+ m = tvm.runtime.load_module(hw_lib, "vta-tsim")
f = tvm.get_global_func("tvm.vta.tsim.init")
f(m)
assert lib_hw # make sure to build vta/hardware/chisel
try:
f = tvm.get_global_func("vta.tsim.init")
- m = tvm.module.load(lib_hw[0], "vta-tsim")
+ m = tvm.runtime.load_module(lib_hw[0], "vta-tsim")
f(m)
return lib_hw
except OSError:
opt = parse_arguments()
# Make sure that TVM was compiled with RPC=1
- assert tvm.module.enabled("rpc")
+ assert tvm.runtime.enabled("rpc")
# Read in VTA environment
env = vta.get_env()
# VTA target and execution context
target = env.target if opt.device == "vta" else env.target_vta_cpu
ctx = remote.ext_dev(0) if opt.device == "vta" else remote.cpu(0)
-
+
# Compile Relay program
print("Initial compile...")
relay_prog, params = compile_network(opt, env, target)
tune_tasks(tasks, **tuning_opt)
# Compile kernels with history best records
- with autotvm.tophub.context(target, extra_files=[opt.log_filename]):
+ with autotvm.tophub.context(target, extra_files=[opt.log_filename]):
# Compile network
print("Compiling network with best tuning parameters...")
return Module(n);
}
-TVM_REGISTER_GLOBAL("module.loadfile_vta-tsim")
+TVM_REGISTER_GLOBAL("runtime.module.loadfile_vta-tsim")
.set_body([](TVMArgs args, TVMRetValue* rv) {
*rv = DPIModuleNode::Load(args[0]);
});
if device == "vta":
target = env.target
if env.TARGET not in ["sim", "tsim"]:
- assert tvm.module.enabled("rpc")
+ assert tvm.runtime.enabled("rpc")
program_fpga(remote, bitstream=None)
reconfig_runtime(remote)
elif device == "arm_cpu":
if device == "vta":
target = env.target
if env.TARGET not in ["sim", "tsim"]:
- assert tvm.module.enabled("rpc")
+ assert tvm.runtime.enabled("rpc")
program_fpga(remote, bitstream=None)
reconfig_runtime(remote)
elif device == "arm_cpu":
if device == "vta":
target = env.target
if env.TARGET not in ["sim", "tsim"]:
- assert tvm.module.enabled("rpc")
+ assert tvm.runtime.enabled("rpc")
program_fpga(remote, bitstream=None)
reconfig_runtime(remote)
elif device == "arm_cpu":
w_np = np.random.randint(w_min, w_max, size=w_shape).astype(kernel.dtype)
b_np = np.random.randint(b_min, b_max, size=b_shape).astype(env.acc_dtype)
r_np = topi.testing.conv2d_nchw_python(
- a_np.astype(env.acc_dtype), w_np.astype(env.acc_dtype),
+ a_np.astype(env.acc_dtype), w_np.astype(env.acc_dtype),
(wl.hstride, wl.wstride), wl.hpad, wl.groups).astype(env.acc_dtype)
return a_np, w_np, b_np, r_np
if device == "vta":
target = env.target
if env.TARGET not in ["sim", "tsim"]:
- assert tvm.module.enabled("rpc")
+ assert tvm.runtime.enabled("rpc")
program_fpga(remote, bitstream=None)
reconfig_runtime(remote)
elif device == "arm_cpu":
----------
path : path to bitstream (optional)
"""
- assert tvm.module.enabled("rpc")
+ assert tvm.runtime.enabled("rpc")
remote = rpc.connect(host, port)
program_fpga(remote, path)
def reconfig_rpc_runtime():
"""Reconfig the RPC server runtime
"""
- assert tvm.module.enabled("rpc")
+ assert tvm.runtime.enabled("rpc")
remote = rpc.connect(host, port)
reconfig_runtime(remote)
from vta.top import graph_pack
# Make sure that TVM was compiled with RPC=1
-assert tvm.module.enabled("rpc")
+assert tvm.runtime.enabled("rpc")
######################################################################
# Define the platform and model targets
m.set_input('data', image)
# Perform inference and gather execution statistics
-# More on: https://docs.tvm.ai/api/python/module.html#tvm.module.Module.time_evaluator
+# More on: https://docs.tvm.ai/api/python/module.html#tvm.runtime.Module.time_evaluator
num = 4 # number of times we run module for a single measurement
rep = 3 # number of measurements (we derive std dev from this)
timer = m.module.time_evaluator("run", ctx, number=num, repeat=rep)
if env.TARGET == "pynq":
# Make sure that TVM was compiled with RPC=1
- assert tvm.module.enabled("rpc")
+ assert tvm.runtime.enabled("rpc")
remote = rpc.connect(host, port)
# Reconfigure the JIT runtime
if env.TARGET == "pynq":
# Make sure that TVM was compiled with RPC=1
- assert tvm.module.enabled("rpc")
+ assert tvm.runtime.enabled("rpc")
remote = rpc.connect(host, port)
# Reconfigure the JIT runtime
if env.TARGET == "pynq":
# Make sure that TVM was compiled with RPC=1
- assert tvm.module.enabled("rpc")
+ assert tvm.runtime.enabled("rpc")
remote = rpc.connect(host, port)
# Reconfigure the JIT runtime
if env.TARGET == "pynq":
# Make sure that TVM was compiled with RPC=1
- assert tvm.module.enabled("rpc")
+ assert tvm.runtime.enabled("rpc")
remote = rpc.connect(host, port)
# Reconfigure the JIT runtime
import os
def prepare_test_libs(base_path):
target = "llvm -target=asmjs-unknown-emscripten -system-lib"
- if not tvm.module.enabled(target):
+ if not tvm.runtime.enabled(target):
raise RuntimeError("Target %s is not enbaled" % target)
n = tvm.var("n")
A = tvm.placeholder((n,), name='A')
*/
this.systemLib = function() {
if (typeof systemFunc.fGetSystemLib === "undefined") {
- systemFunc.fGetSystemLib = getGlobalFunc("module._GetSystemLib");
+ systemFunc.fGetSystemLib = getGlobalFunc("runtime.SystemLib");
}
return systemFunc.fGetSystemLib();
};