From: Tianqi Chen Date: Tue, 28 Jul 2020 20:18:14 +0000 (-0700) Subject: Correct runtime.load_module (#6161) X-Git-Tag: upstream/0.7.0~341 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=1e9e4b9fee46119c8bf52d8ea5d58301fe273780;p=platform%2Fupstream%2Ftvm.git Correct runtime.load_module (#6161) --- diff --git a/docs/deploy/hls.rst b/docs/deploy/hls.rst index 64717ed..da1721d 100644 --- a/docs/deploy/hls.rst +++ b/docs/deploy/hls.rst @@ -64,11 +64,11 @@ We use two python scripts for this tutorial. tgt="sdaccel" - fadd = tvm.runtime.load("myadd.so") + fadd = tvm.runtime.load_module("myadd.so") if os.environ.get("XCL_EMULATION_MODE"): - fadd_dev = tvm.runtime.load("myadd.xclbin") + fadd_dev = tvm.runtime.load_module("myadd.xclbin") else: - fadd_dev = tvm.runtime.load("myadd.awsxclbin") + fadd_dev = tvm.runtime.load_module("myadd.awsxclbin") fadd.import_module(fadd_dev) ctx = tvm.context(tgt, 0) diff --git a/docs/dev/introduction_to_module_serialization.rst b/docs/dev/introduction_to_module_serialization.rst index 78f6d71..5451b84 100644 --- a/docs/dev/introduction_to_module_serialization.rst +++ b/docs/dev/introduction_to_module_serialization.rst @@ -53,7 +53,7 @@ Let us build one ResNet-18 workload for GPU as an example first. resnet18_lib.export_library(path_lib) # load it back - loaded_lib = tvm.runtime.load(path_lib) + loaded_lib = tvm.runtime.load_module(path_lib) assert loaded_lib.type_key == "library" assert loaded_lib.imported_modules[0].type_key == "cuda" diff --git a/docs/dev/relay_bring_your_own_codegen.rst b/docs/dev/relay_bring_your_own_codegen.rst index 0cced36..4d761bf 100644 --- a/docs/dev/relay_bring_your_own_codegen.rst +++ b/docs/dev/relay_bring_your_own_codegen.rst @@ -905,7 +905,7 @@ We also need to register this function to enable the corresponding Python API: TVM_REGISTER_GLOBAL("module.loadbinary_examplejson") .set_body_typed(ExampleJsonModule::LoadFromBinary); -The above registration means when users call ``tvm.runtime.load(lib_path)`` API and the exported library has an ExampleJSON stream, our ``LoadFromBinary`` will be invoked to create the same customized runtime module. +The above registration means when users call ``tvm.runtime.load_module(lib_path)`` API and the exported library has an ExampleJSON stream, our ``LoadFromBinary`` will be invoked to create the same customized runtime module. In addition, if you want to support module creation directly from an ExampleJSON file, you can also implement a simple function and register a Python API as follows: @@ -930,7 +930,7 @@ In addition, if you want to support module creation directly from an ExampleJSON *rv = ExampleJsonModule::Create(args[0]); }); -It means users can manually write/modify an ExampleJSON file, and use Python API ``tvm.runtime.load("mysubgraph.examplejson", "examplejson")`` to construct a customized module. +It means users can manually write/modify an ExampleJSON file, and use Python API ``tvm.runtime.load_module("mysubgraph.examplejson", "examplejson")`` to construct a customized module. ******* Summary @@ -954,7 +954,7 @@ In summary, here is a checklist for you to refer: * ``Run`` to execute a subgraph. * Register a runtime creation API. * ``SaveToBinary`` and ``LoadFromBinary`` to serialize/deserialize customized runtime module. - * Register ``LoadFromBinary`` API to support ``tvm.runtime.load(your_module_lib_path)``. + * Register ``LoadFromBinary`` API to support ``tvm.runtime.load_module(your_module_lib_path)``. * (optional) ``Create`` to support customized runtime module construction from subgraph file in your representation. * An annotator to annotate a user Relay program to make use of your compiler and runtime (TBA). diff --git a/rust/tvm/examples/resnet/src/build_resnet.py b/rust/tvm/examples/resnet/src/build_resnet.py index a09a0c3..1142f99 100644 --- a/rust/tvm/examples/resnet/src/build_resnet.py +++ b/rust/tvm/examples/resnet/src/build_resnet.py @@ -112,7 +112,7 @@ def download_img_labels(): def test_build(build_dir): """ Sanity check with random input""" graph = open(osp.join(build_dir, "deploy_graph.json")).read() - lib = tvm.runtime.load(osp.join(build_dir, "deploy_lib.so")) + lib = tvm.runtime.load_module(osp.join(build_dir, "deploy_lib.so")) params = bytearray(open(osp.join(build_dir,"deploy_param.params"), "rb").read()) input_data = tvm.nd.array(np.random.uniform(size=data_shape).astype("float32")) ctx = tvm.cpu()