[REFACTOR][RELAY] Replace build_config with PassContext (#5698)
authorZhi <5145158+zhiics@users.noreply.github.com>
Sat, 30 May 2020 04:59:35 +0000 (21:59 -0700)
committerGitHub <noreply@github.com>
Sat, 30 May 2020 04:59:35 +0000 (21:59 -0700)
72 files changed:
apps/android_camera/models/prepare_model.py
apps/benchmark/arm_cpu_imagenet_bench.py
apps/benchmark/gpu_imagenet_bench.py
apps/benchmark/mobile_gpu_imagenet_bench.py
apps/bundle_deploy/build_model.py
apps/sgx/src/build_model.py
golang/sample/gen_mobilenet_lib.py
python/tvm/relay/frontend/common.py
python/tvm/relay/quantize/_calibrate.py
python/tvm/relay/transform/transform.py
rust/frontend/examples/resnet/src/build_resnet.py
src/relay/backend/build_module.cc
tests/cpp/relay_transform_sequential.cc
tests/python/frontend/caffe2/test_forward.py
tests/python/frontend/coreml/test_forward.py
tests/python/frontend/keras/test_forward.py
tests/python/frontend/mxnet/test_forward.py
tests/python/frontend/mxnet/test_qnn_ops_utils.py
tests/python/frontend/onnx/test_forward.py
tests/python/frontend/pytorch/qnn_test.py
tests/python/frontend/pytorch/test_forward.py
tests/python/frontend/tensorflow/test_bn_dynamic.py
tests/python/frontend/tensorflow/test_forward.py
tests/python/frontend/tflite/test_forward.py
tests/python/nightly/quantization/test_quantization_accuracy.py
tests/python/relay/benchmarking/benchmark_vm.py
tests/python/relay/test_backend_compile_engine.py
tests/python/relay/test_backend_graph_runtime.py
tests/python/relay/test_cpp_build_module.py
tests/python/relay/test_external_codegen.py
tests/python/relay/test_memory_passes.py
tests/python/relay/test_op_fast_math.py
tests/python/relay/test_op_level2.py
tests/python/relay/test_op_qnn_conv2d.py
tests/python/relay/test_op_qnn_dense.py
tests/python/relay/test_op_qnn_dequantize.py
tests/python/relay/test_op_qnn_quantize.py
tests/python/relay/test_op_qnn_requantize.py
tests/python/relay/test_pass_annotate_target.py
tests/python/relay/test_pass_fast_math.py
tests/python/relay/test_pass_fold_constant.py
tests/python/relay/test_pass_manager.py
tests/python/relay/test_pass_partition_graph.py
tests/python/relay/test_simplify_fc_transpose.py
tests/python/relay/test_sparse_dense_convert.py
tests/python/unittest/test_runtime_module_export.py
tests/python/unittest/test_target_codegen_blob.py
tutorials/autotvm/tune_relay_arm.py
tutorials/autotvm/tune_relay_cuda.py
tutorials/autotvm/tune_relay_mobile_gpu.py
tutorials/autotvm/tune_relay_x86.py
tutorials/dev/relay_pass_infra.py
tutorials/frontend/build_gcn.py
tutorials/frontend/deploy_model_on_android.py
tutorials/frontend/deploy_model_on_rasp.py
tutorials/frontend/deploy_prequantized.py
tutorials/frontend/deploy_prequantized_tflite.py
tutorials/frontend/deploy_ssd_gluoncv.py
tutorials/frontend/from_caffe2.py
tutorials/frontend/from_coreml.py
tutorials/frontend/from_darknet.py
tutorials/frontend/from_keras.py
tutorials/frontend/from_mxnet.py
tutorials/frontend/from_onnx.py
tutorials/frontend/from_pytorch.py
tutorials/frontend/from_tensorflow.py
tutorials/frontend/from_tflite.py
tutorials/relay_quick_start.py
vta/scripts/tune_resnet.py
vta/tutorials/autotvm/tune_relay_vta.py
vta/tutorials/frontend/deploy_classification.py
vta/tutorials/frontend/deploy_detection.py

index 36674d2..703a465 100644 (file)
@@ -87,7 +87,7 @@ def main(model_str, output_path):
     except FileExistsError:
         pass
     print("building...")
-    with relay.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         graph, lib, params = relay.build(net, target, target_host=target_host, params=params)
     print("dumping lib...")
     lib.export_library(output_path_str + '/' + 'deploy_lib_cpu.so', ndk.create_shared)
index 53b6168..f319d5a 100644 (file)
@@ -39,7 +39,7 @@ def evaluate_network(network, target, target_host, repeat):
     net, params, input_shape, output_shape = get_network(network, batch_size=1)
 
     print_progress("%-20s building..." % network)
-    with relay.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         graph, lib, params = relay.build(
             net, target=target, target_host=target_host, params=params)
 
index 0023700..a3df2c4 100644 (file)
@@ -33,7 +33,7 @@ from util import get_network
 def benchmark(network, target):
     net, params, input_shape, output_shape = get_network(network, batch_size=1)
 
-    with relay.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         graph, lib, params = relay.build(net, target=target, params=params)
 
     # create runtime
index 4f93a0d..83127ff 100644 (file)
@@ -38,7 +38,7 @@ def evaluate_network(network, target, target_host, dtype, repeat):
     net, params, input_shape, output_shape = get_network(network, batch_size=1, dtype=dtype)
 
     print_progress("%-20s building..." % network)
-    with relay.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         graph, lib, params = relay.build(
             net, target=target, target_host=target_host, params=params)
 
index 63d658e..1d415cd 100644 (file)
@@ -33,7 +33,7 @@ def build_module(opts):
     func = mod["main"]
     func = relay.Function(func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs)
 
-    with relay.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         graph, lib, params = relay.build(
             func, 'llvm --system-lib', params=params)
 
index 6e0933e..b988574 100755 (executable)
@@ -37,7 +37,7 @@ def main():
     net, params = relay.testing.resnet.get_workload(
         layers=18, batch_size=dshape[0], image_shape=dshape[1:])
 
-    with relay.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         graph, lib, params = relay.build(
             net, 'llvm --system-lib', params=params)
 
index 8becd07..d4dcf21 100644 (file)
@@ -16,7 +16,7 @@
 # under the License.
 
 import os
-from tvm import relay
+from tvm import relay, transform
 from tvm.contrib.download import download_testdata
 
 
@@ -77,7 +77,7 @@ mod, params = relay.frontend.from_tflite(tflite_model,
 target = 'llvm'
 
 # Build with Relay
-with relay.build_config(opt_level=3):
+with transform.PassContext(opt_level=3):
     graph, lib, params = relay.build_module.build(
         mod, target, params=params)
 
index e86890f..05222c6 100644 (file)
@@ -505,7 +505,7 @@ def infer_value(input_val, params, mod=None):
         assert all(var.name_hint in params.keys() for var in analysis.free_vars(
             input_val)), "All inputs to infer must be available in params."
         func = _function.Function(analysis.free_vars(input_val), input_val)
-        with tvm.relay.build_config(opt_level=0):
+        with tvm.transform.PassContext(opt_level=0):
             graph, lib, params = tvm.relay.build(func, target="llvm", params=params)
         ctx = tvm.cpu(0)
         m = graph_runtime.create(graph, lib, ctx)
index 9794698..59ee51b 100644 (file)
@@ -28,7 +28,6 @@ from . import quantize
 from .. import op as _op
 from .. import expr as _expr
 from .. import analysis as _analysis
-from .. import transform as _transform
 from .. import build_module as _build_module
 from ...contrib import graph_runtime
 from .kl_divergence import _find_scale_by_kl
@@ -45,7 +44,7 @@ def _get_profile_runtime(mod):
         target = 'llvm'
         ctx = tvm.context(target)
 
-    with _transform.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         graph, lib, params = _build_module.build(func, target=target)
     runtime = graph_runtime.create(graph, lib, ctx)
     runtime.set_input(**params)
index 19ddb32..8f4ec10 100644 (file)
@@ -21,6 +21,7 @@ Relay pass transformation infrastructure.
 import types
 import inspect
 import functools
+import warnings
 
 import tvm.ir
 from tvm import te
@@ -34,7 +35,9 @@ def build_config(opt_level=2,
                  required_pass=None,
                  disabled_pass=None,
                  trace=None):
-    """Configure the build behavior by setting config variables.
+    """Configure the build behavior by setting config variables. This function
+    will be deprecated in TVM v0.7. Instead, we should directly use
+    tvm.transform.PassContext.
 
     Parameters
     ----------
@@ -72,8 +75,9 @@ def build_config(opt_level=2,
     pass_context: PassContext
         The pass context for optimizations.
     """
-    return tvm.ir.transform.PassContext(opt_level, required_pass,
-                                        disabled_pass, trace)
+    warnings.warn("relay.build_config will be deprecated. Please use \
+                  tvm.transform.PassContext directly", DeprecationWarning)
+    return tvm.transform.PassContext(opt_level, required_pass, disabled_pass, trace)
 
 
 @tvm._ffi.register_object("relay.FunctionPass")
index 49c67bf..a09a0c3 100644 (file)
@@ -75,8 +75,8 @@ def build(target_dir):
             num_layers=18, batch_size=batch_size, image_shape=image_shape)
 
     # compile the model
-    with relay.build_config(opt_level=opt_level):
-            graph, lib, params = relay.build_module.build(net, target, params=params)
+    with tvm.transform.PassContext(opt_level=opt_level):
+        graph, lib, params = relay.build_module.build(net, target, params=params)
 
     # save the model artifacts
     lib.save(deploy_lib)
index abce068..7aad766 100644 (file)
@@ -304,9 +304,8 @@ class RelayBuildModule : public runtime::ModuleNode {
     // Handle heterogeneous compilation.
     transform::PassContext pass_ctx = PassContext::Current();
     if (targets_.size() > 1) {
-      Optional<IntImm> opt_fallback_dev =
-          pass_ctx->GetConfig("relay.fallback_device_type",
-                              IntImm(runtime::DataType::Int(32), static_cast<int>(kDLCPU)));
+      Optional<Integer> opt_fallback_dev =
+          pass_ctx->GetConfig("relay.fallback_device_type", Integer(static_cast<int>(kDLCPU)));
       auto fallback_dev = opt_fallback_dev.value();
       CHECK_GT(fallback_dev->value, 0U);
       relay_module = RunDeviceAnnotationPass(relay_module, fallback_dev->value);
index 60d3a5e..f08d557 100644 (file)
@@ -70,7 +70,7 @@ TEST(Relay, Sequential) {
   auto mod = IRModule::FromExpr(func);
   auto pass_ctx = relay::transform::PassContext::Create();
   pass_ctx->opt_level = 3;
-  pass_ctx->config.Set("relay.fallback_device_type", IntImm(DataType::Int(32), 1));
+  pass_ctx->config.Set("relay.fallback_device_type", Integer(1));
   {
     tvm::With<relay::transform::PassContext> ctx_scope(pass_ctx);
     tvm::With<tvm::Target> tctx(tvm::Target::Create("llvm"));
index f052872..50a8781 100644 (file)
@@ -43,7 +43,7 @@ def get_tvm_output(model,
     dtype_dict = {input_names: input_data.dtype}
     mod, params = relay.frontend.from_caffe2(
         model.init_net, model.predict_net, shape_dict, dtype_dict)
-    with relay.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         graph, lib, params = relay.build(mod, target, params=params)
 
     m = graph_runtime.create(graph, lib, ctx)
index 3a15638..179f5b4 100644 (file)
@@ -33,7 +33,7 @@ import model_zoo
 
 def get_tvm_output(func, x, params, target, ctx,
                    out_shape=(1, 1000), input_name='image', dtype='float32'):
-    with relay.transform.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         graph, lib, params = relay.build(func, target, params=params)
     m = graph_runtime.create(graph, lib, ctx)
     # set inputs
@@ -76,7 +76,7 @@ def run_tvm_graph(coreml_model, target, ctx, input_data, input_name, output_shap
         dtype_dict = {input_name: input_data.dtype}
 
     mod, params = relay.frontend.from_coreml(coreml_model, shape_dict)
-    with relay.transform.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         graph, lib, params = relay.build(mod, target, params=params)
 
     from tvm.contrib import graph_runtime
index ed0181f..9b963c3 100644 (file)
@@ -84,7 +84,7 @@ def verify_keras_frontend(keras_model, need_transpose=True, layout='NCHW'):
     def get_tvm_output(xs, target, ctx, dtype='float32'):
         shape_dict = {name: x.shape for (name, x) in zip(keras_model.input_names, xs)}
         mod, params = relay.frontend.from_keras(keras_model, shape_dict, layout=layout)
-        with relay.transform.build_config(opt_level=2):
+        with tvm.transform.PassContext(opt_level=2):
             graph, lib, params = relay.build(mod,
                                              target,
                                              params=params)
index 6d36ea3..5ed2fb8 100644 (file)
@@ -66,7 +66,7 @@ def verify_mxnet_frontend_impl(mx_symbol,
                                                     shape_dict,
                                                     arg_params=args,
                                                     aux_params=auxs)
-        with relay.build_config(opt_level=3):
+        with tvm.transform.PassContext(opt_level=3):
             graph, lib, params = relay.build(mod, target, params=params)
         m = graph_runtime.create(graph, lib, ctx)
         # set inputs
index d130eef..541162d 100644 (file)
@@ -15,8 +15,8 @@
 # specific language governing permissions and limitations
 # under the License.
 
-import tvm
 import numpy as np
+import tvm
 from tvm import relay
 from tvm.contrib import graph_runtime
 from tvm.relay.frontend.mxnet_qnn_op_utils import dequantize_mxnet_min_max, \
@@ -39,7 +39,7 @@ def test_mkldnn_dequantize():
                                                       in_dtype=in_dtype)
         mod = relay.Function(relay.analysis.free_vars(dequantized_output), dequantized_output)
         mod = tvm.IRModule.from_expr(mod)
-        with relay.build_config(opt_level=3):
+        with tvm.transform.PassContext(opt_level=3):
             graph, lib, params = relay.build(mod, "llvm", params=None)
             rt_mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0))
             rt_mod.set_input(input_data=in_data)
@@ -93,7 +93,7 @@ def test_mkldnn_quantize():
                                                         out_dtype=out_dtype)
         mod = relay.Function(relay.analysis.free_vars(quantized_output), quantized_output)
         mod = tvm.IRModule.from_expr(mod)
-        with relay.build_config(opt_level=3):
+        with tvm.transform.PassContext(opt_level=3):
             graph, lib, params = relay.build(mod, "llvm", params=None)
             rt_mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0))
             rt_mod.set_input(input_data=in_data)
index cd01294..edc33b7 100644 (file)
@@ -65,7 +65,7 @@ def get_tvm_output(graph_def, input_data, target, ctx, output_shape=None, output
 
     mod, params = relay.frontend.from_onnx(graph_def, shape_dict, opset=opset)
 
-    with relay.build_config(opt_level=1):
+    with tvm.transform.PassContext(opt_level=1):
         graph, lib, params = relay.build(mod,
                                          target,
                                          params=params)
index bf5fa98..551cdc4 100644 (file)
@@ -41,7 +41,7 @@ def get_tvm_runtime(script_module, input_name, ishape):
     input_shapes = [(input_name, ishape)]
     mod, params = relay.frontend.from_pytorch(script_module, input_shapes)
 
-    with relay.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         # test on only cpu for now, torch cannot run quant models on cuda
         # also not to make CI too slow
         json, lib, params = relay.build(mod, target="llvm", params=params)
index 6159bb8..f6edbf1 100644 (file)
@@ -176,7 +176,7 @@ def verify_model(model_name, input_data=[],
     compiled_input = dict(zip(input_names,
                               [inp.cpu().numpy() for inp in baseline_input]))
 
-    with relay.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         for target, ctx in ctx_list:
             relay_graph, relay_lib, relay_params = relay.build(mod, target=target, params=params)
             relay_model = graph_runtime.create(relay_graph, relay_lib, ctx)
@@ -2294,7 +2294,7 @@ def test_forward_pretrained_bert_base_uncased():
     # ----------------------------
 
     target = 'llvm'
-    with relay.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         relay_graph, relay_lib, relay_params = relay.build(mod, target=target, params=params)
 
     ######################################################################
index a2d6903..e80d774 100644 (file)
@@ -50,7 +50,7 @@ def verify_fused_batch_norm(shape):
             continue
         mod, params = relay.frontend.from_tensorflow(constant_graph,
                                                      outputs=['output'])
-        with relay.build_config(opt_level=3):
+        with tvm.transform.PassContext(opt_level=3):
             graph, lib, params = relay.build(mod,
                                              target=device,
                                              params=params)
index c6a285c..89a0335 100644 (file)
@@ -123,7 +123,7 @@ def run_tvm_graph(graph_def, input_data, input_node, num_output=1,
         result = ex.evaluate()(*inputs)
         return vmobj_to_list(result)
     else:
-        with relay.build_config(opt_level=opt_level):
+        with tvm.transform.PassContext(opt_level=opt_level):
             graph, lib, params = relay.build(mod, target, target_host, params)
 
         ctx = tvm.context(target, 0)
@@ -2307,7 +2307,7 @@ def test_forward_ptb():
                       'Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_c': 'float32',
                       'Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_h': 'float32'}
         target = 'llvm'
-        with relay.build_config(opt_level=0):
+        with tvm.transform.PassContext(opt_level=0):
             graph, lib, params = relay.build(mod,
                                              target,
                                              params=params)
index a68fd90..24b82c6 100644 (file)
@@ -109,7 +109,7 @@ def run_tvm_graph(tflite_model_buf, input_data, input_node, num_output=1, target
                                              shape_dict=shape_dict,
                                              dtype_dict=dtype_dict)
 
-    with relay.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         graph, lib, params = relay.build(mod, target, params=params)
 
     ctx = tvm.context(target, 0)
index 4818cc6..d4b55f1 100644 (file)
@@ -66,7 +66,7 @@ def get_model(model_name, batch_size, qconfig, target=None, original=False, simu
     mod, params = relay.frontend.from_mxnet(gluon_model, {"data": data_shape})
     net = mod['main']
 
-    with relay.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         qfunc = relay.quantize.prerequisite_optimize(net, params=params)
     logging.debug('original')
     logging.debug(qfunc.astext(show_meta_data=False))
@@ -83,7 +83,7 @@ def get_model(model_name, batch_size, qconfig, target=None, original=False, simu
 
 
 def eval_acc(model, dataset, batch_fn, target=tvm.target.cuda(), ctx=tvm.gpu(), log_interval=100):
-    with relay.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         graph, lib, params = relay.build(model, target)
     # create runtime module
     m = tvm.contrib.graph_runtime.create(graph, lib, ctx)
index 1e9030c..a6e05be 100644 (file)
@@ -36,7 +36,7 @@ def benchmark_execution(mod,
                         model="unknown"):
     def get_graph_runtime_output(mod, data, params, target, ctx,
                                  dtype='float32', number=2, repeat=20):
-        with relay.build_config(opt_level=3):
+        with tvm.transform.PassContext(opt_level=3):
             graph, lib, params = relay.build(mod, target, params=params)
 
         m = graph_runtime.create(graph, lib, ctx)
@@ -59,7 +59,7 @@ def benchmark_execution(mod,
 
     def get_vm_output(mod, data, params, target, ctx, dtype='float32',
                       number=2, repeat=20):
-        with relay.build_config(opt_level=3):
+        with tvm.transform.PassContext(opt_level=3):
             exe = vm.compile(mod, target, params=params)
             rly_vm = vm_rt.VirtualMachine(exe)
             rly_vm.init(ctx)
index eb018fe..1b4e08f 100644 (file)
@@ -184,7 +184,7 @@ def test_compile_placeholder_bypass():
     z = relay.var("z", shape=(2, 3))
     result = relay.Tuple([x, relay.op.concatenate([y, z], axis=0)])
     func = relay.Function(relay.analysis.free_vars(result), result)
-    with relay.build_config(opt_level=0):
+    with tvm.transform.PassContext(opt_level=0):
        graph, lib, params = relay.build(tvm.IRModule.from_expr(func), 'llvm')
 
 
index 226d5ba..f0785bc 100644 (file)
@@ -166,7 +166,7 @@ def test_gru_like():
     z = unit(rnn_dim)
 
     for target, ctx in ctx_list():
-        with relay.build_config(opt_level=2):
+        with tvm.transform.PassContext(opt_level=2):
             graph, lib, params = relay.build(tvm.IRModule.from_expr(z), target)
             m = graph_runtime.create(graph, lib, ctx)
             m.set_input("X", tvm.nd.array(x.astype(dtype)))
index 171b6b0..8d54384 100644 (file)
@@ -115,7 +115,7 @@ def test_fp16_conversion():
             X = tvm.nd.array(n * np.random.randn(n).astype(src) - n / 2)
 
             # build
-            with relay.build_config(opt_level=1):
+            with tvm.transform.PassContext(opt_level=1):
                 g_json, mmod, params = relay.build(tvm.IRModule.from_expr(func), tgt)
 
             # test
index 3797910..c449ce3 100644 (file)
@@ -49,7 +49,8 @@ def check_result(mod, map_inputs, out_shape, result, tol=1e-5, target="llvm",
         return lib
 
     def check_vm_result():
-        with relay.build_config(opt_level=3, disabled_pass=["AlterOpLayout"]):
+        with tvm.transform.PassContext(opt_level=3,
+                                       disabled_pass=["AlterOpLayout"]):
             exe = relay.vm.compile(mod, target=target)
         code, lib = exe.save()
         lib = update_lib(lib)
@@ -60,7 +61,8 @@ def check_result(mod, map_inputs, out_shape, result, tol=1e-5, target="llvm",
         tvm.testing.assert_allclose(out.asnumpy(), result, rtol=tol, atol=tol)
 
     def check_graph_runtime_result():
-        with relay.build_config(opt_level=3, disabled_pass=["AlterOpLayout"]):
+        with tvm.transform.PassContext(opt_level=3,
+                                       disabled_pass=["AlterOpLayout"]):
             json, lib, _ = relay.build(mod, target=target)
         lib = update_lib(lib)
         rt_mod = tvm.contrib.graph_runtime.create(json, lib, ctx)
index 70e7086..dc16865 100644 (file)
@@ -37,7 +37,7 @@ def check_memory_plan(func, check_fn):
     no_plan_result = ex.evaluate(mod['main'])(*args)
 
     # Compute with memory planning.
-    with relay.build_config(opt_level=1, disabled_pass=["MemoryPlan"]):
+    with tvm.transform.PassContext(opt_level=1, disabled_pass=["MemoryPlan"]):
         plan_result = ex.evaluate(mod['main'])(*args)
 
     # Compute Python result.
index 215b83e..a771d29 100644 (file)
@@ -34,7 +34,7 @@ def test_fastmath():
         func = relay.Function([x], y)
         mod = tvm.IRModule.from_expr(func)
 
-        with relay.build_config(opt_level=3, required_pass=['FastMath']):
+        with tvm.transform.PassContext(opt_level=3, required_pass=['FastMath']):
             graph, lib, params = relay.build(mod, target="llvm", params=None)
 
         # Check that the op related to fast math have been convered to function in lib
index 68eced3..3e8720d 100644 (file)
@@ -262,7 +262,7 @@ def test_conv2d_run():
         with open(temp.relpath("temp.log"), "w") as log_file:
             log_file.write(test_schedule)
         with autotvm.apply_history_best(temp.relpath("temp.log")):
-            with relay.build_config(opt_level=3):
+            with tvm.transform.PassContext(opt_level=3):
                 print('Compiling...')
                 graph_json, mod, params = tvm.relay.build(mod, target="llvm -device=arm_cpu")
 
@@ -356,7 +356,7 @@ def test_conv2d_winograd():
             data.astype(out_dtype), kernel.astype(out_dtype), 1, padding,
             groups=groups)
 
-        with WinogradFallback(), relay.build_config(opt_level=3):
+        with WinogradFallback(), tvm.transform.PassContext(opt_level=3):
             for target, ctx in ctx_list():
                 if target != 'cuda':
                     continue
@@ -578,7 +578,7 @@ def test_conv3d_winograd():
             data.astype(out_dtype), kernel.astype(out_dtype), 1, padding,
             groups=groups)
 
-        with WinogradFallback(), relay.build_config(opt_level=3):
+        with WinogradFallback(), tvm.transform.PassContext(opt_level=3):
             for target, ctx in ctx_list():
                 if target != 'cuda':
                     continue
@@ -1199,7 +1199,7 @@ def test_conv2d_int8_intrinsics():
         wdata = np.random.rand(*kernel_shape) * 10
         parameters = {"weight": tvm.nd.array(wdata.astype(weight_dtype))}
 
-        with relay.build_config(opt_level=3):
+        with tvm.transform.PassContext(opt_level=3):
             graph, lib, params = relay.build(func, target, params=parameters)
 
         assembly = lib.get_source("asm")
@@ -1314,7 +1314,7 @@ def test_depthwise_conv2d_int8():
     llvm_version = tvm.target.codegen.llvm_version_major()
     for target in targets:
         if llvm_version >= 8:
-            with relay.build_config(opt_level=3):
+            with tvm.transform.PassContext(opt_level=3):
                 graph, lib, params = relay.build(func, target, params=parameters)
 
 
index 6911c52..fcb335f 100644 (file)
@@ -182,7 +182,7 @@ def verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape,
 
 
     def get_output(func, golden_inputs):
-        with relay.build_config(opt_level=2):
+        with tvm.transform.PassContext(opt_level=2):
             golden_data, golden_weight = golden_inputs
             params = {'kernel': golden_weight}
             graph, lib, params = relay.build(func, "llvm", params=params)
@@ -655,7 +655,7 @@ def test_tflite_large_irregular():
         golden_data = np.full(data_shape, 127).astype('uint8')
         golden_weight = np.full(kernel_shape, 127).astype('uint8')
 
-        with relay.build_config(opt_level=2):
+        with tvm.transform.PassContext(opt_level=2):
             params = {'kernel': golden_weight}
             graph, lib, params = relay.build(qnn_func, "llvm", params=params)
             mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0))
@@ -698,7 +698,7 @@ def test_tflite_output_multiplier_greater_than_one():
                                         -1, -1, 1, 1)).reshape(kernel_shape)
         golden_weight = golden_weight.astype('uint8')
 
-        with relay.build_config(opt_level=2):
+        with tvm.transform.PassContext(opt_level=2):
             params = {'kernel': golden_weight}
             graph, lib, params = relay.build(qnn_func, "llvm", params=params)
             mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0))
@@ -744,7 +744,7 @@ def test_tflite_anistropic_strides():
         golden_weight = np.array((129, 131, 133, 135)).reshape(kernel_shape)
         golden_weight = golden_weight.astype('uint8')
 
-        with relay.build_config(opt_level=2):
+        with tvm.transform.PassContext(opt_level=2):
             params = {'kernel': golden_weight}
             graph, lib, params = relay.build(qnn_func, "llvm", params=params)
             mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0))
@@ -789,7 +789,7 @@ def test_broadcast_layout():
         func = relay.add(func, bias)
         func = relay.Function(relay.analysis.free_vars(func), func)
         mod = tvm.IRModule.from_expr(func)
-        with relay.build_config(opt_level=3):
+        with tvm.transform.PassContext(opt_level=3):
             graph, lib, params = relay.build(mod, "llvm -mcpu=skylake-avx512")
 
 def test_depthwise_depth_multiplier():
index 3cfcfd1..0ba3210 100644 (file)
@@ -167,7 +167,7 @@ def qnn_dense_driver(test_configuration):
     mod = relay.Function(relay.analysis.free_vars(mod), mod)
     mod = tvm.IRModule.from_expr(mod)
     mod = relay.qnn.transform.CanonicalizeOps()(mod)
-    with relay.build_config(opt_level=2):
+    with tvm.transform.PassContext(opt_level=2):
         graph, lib, params = relay.build(mod, "llvm", params=None)
         mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0))
         mod.set_input(quantized_data_name, test_configuration[quantized_data_name])
index febf5c5..3c82b7f 100644 (file)
@@ -30,7 +30,7 @@ def quantize_test_driver(in_dtype, quant_args, in_data, verify_output_data):
                                                input_zero_point=input_zero_point)
     mod = relay.Function(relay.analysis.free_vars(quantized_output), quantized_output)
     mod = tvm.IRModule.from_expr(mod)
-    with relay.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         graph, lib, params = relay.build(mod, "llvm", params=None)
         rt_mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0))
         rt_mod.set_input(input_data=in_data)
index 09b04d8..a284e8b 100644 (file)
@@ -32,7 +32,7 @@ def quantize_test_driver(in_dtype, quant_args, axis, out_dtype, in_data, verify_
                                              out_dtype=out_dtype)
     mod = relay.Function(relay.analysis.free_vars(quantized_output), quantized_output)
     mod = tvm.IRModule.from_expr(mod)
-    with relay.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         graph, lib, params = relay.build(mod, "llvm", params=None)
         rt_mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0))
         rt_mod.set_input(input_data=in_data)
index 8123397..fb52b30 100644 (file)
@@ -24,7 +24,7 @@ from tvm.contrib import graph_runtime
 roundings = ["UPWARD", "TONEAREST"]
 
 def verify(mod, goldens):
-    with relay.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         graph, lib, params = relay.build(mod, "llvm", params=None)
         golden_data, golden_output = goldens
         rt_mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0))
index 01ba9b6..0583946 100644 (file)
@@ -52,7 +52,7 @@ def check_result(mod, map_inputs, out_shape, result, tol=1e-5, target="llvm",
         return lib
 
     def check_vm_result():
-        with relay.build_config(opt_level=3, disabled_pass=["AlterOpLayout"]):
+        with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
             exe = relay.vm.compile(mod, target=target, params=params)
         code, lib = exe.save()
         lib = update_lib(lib)
@@ -63,7 +63,7 @@ def check_result(mod, map_inputs, out_shape, result, tol=1e-5, target="llvm",
         tvm.testing.assert_allclose(out.asnumpy(), result, rtol=tol, atol=tol)
 
     def check_graph_runtime_result():
-        with relay.build_config(opt_level=3, disabled_pass=["AlterOpLayout"]):
+        with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
             json, lib, param = relay.build(mod, target=target, params=params)
         lib = update_lib(lib)
         rt_mod = tvm.contrib.graph_runtime.create(json, lib, ctx)
index e75316f..93ad034 100644 (file)
@@ -29,7 +29,7 @@ def test_exp():
     assert "fast_exp" in fast_mod.astext()
 
     # Check that FastMath option works for relay.build.
-    with relay.build_config(opt_level=3, required_pass=['FastMath']):
+    with tvm.transform.PassContext(opt_level=3, required_pass=['FastMath']):
         fast_mod = relay.optimize(mod, target='llvm', params=None)
     assert "fast_exp" in fast_mod[0].astext()
 
@@ -43,7 +43,7 @@ def test_tanh():
     assert "fast_tanh" in fast_mod.astext()
 
     # Check that FastMath option works for relay.build.
-    with relay.build_config(opt_level=3, required_pass=['FastMath']):
+    with tvm.transform.PassContext(opt_level=3, required_pass=['FastMath']):
         fast_mod = relay.optimize(mod, target='llvm', params=None)
     assert "fast_tanh" in fast_mod[0].astext()
 
index 1e8c6da..fcccab5 100644 (file)
@@ -213,7 +213,7 @@ def test_fold_batch_norm():
     mod, params = create_workload(bn_output[0], initializer)
     mod["main"] = bind_params_by_name(mod["main"], params)
 
-    with relay.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         mod = remove_bn_pass(mod)
 
     expect = run_infer_type(expected())
index d6037b5..25299ca 100644 (file)
@@ -382,7 +382,7 @@ def test_sequential_pass():
     def test_only_module_pass():
         passes = [module_pass]
         sequential = tvm.transform.Sequential(opt_level=1, passes=passes)
-        with relay.build_config(required_pass=["mod_transform"]):
+        with tvm.transform.PassContext(required_pass=["mod_transform"]):
             ret_mod = sequential(mod)
         # Check the subtract function.
         sub_var, new_sub = extract_var_func(ret_mod, v_sub.name_hint)
@@ -397,7 +397,7 @@ def test_sequential_pass():
         # Check the subtract function.
         passes = [function_pass]
         sequential = tvm.transform.Sequential(opt_level=1, passes=passes)
-        with relay.build_config(required_pass=["func_transform"]):
+        with tvm.transform.PassContext(required_pass=["func_transform"]):
             ret_mod = sequential(mod)
         _, new_sub = extract_var_func(ret_mod, v_sub.name_hint)
         check_func(new_sub, get_ref_sub())
@@ -413,7 +413,7 @@ def test_sequential_pass():
         passes = [module_pass, function_pass]
         sequential = tvm.transform.Sequential(opt_level=1, passes=passes)
         required = ["mod_transform", "func_transform"]
-        with relay.build_config(required_pass=required):
+        with tvm.transform.PassContext(required_pass=required):
             ret_mod = sequential(mod)
 
         # Check the abs function is added.
@@ -490,7 +490,7 @@ def test_sequential_with_scoping():
     ])
 
     mod = tvm.IRModule({"main": before()})
-    with relay.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         with tvm.target.create("llvm"):
             mod = seq(mod)
 
@@ -515,7 +515,7 @@ def test_print_ir(capfd):
     ])
 
     mod = tvm.IRModule({"main": func})
-    with relay.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         mod = seq(mod)
 
     out = capfd.readouterr().err
@@ -549,7 +549,7 @@ def test_print_debug_callback():
     assert __TRACE_COUNTER__ == 0
     mod = tvm.IRModule({"main": func})
 
-    with relay.build_config(opt_level=3, trace=_tracer):
+    with tvm.transform.PassContext(opt_level=3, trace=_tracer):
         mod = seq(mod)
 
     assert __TRACE_COUNTER__ == 3
index 354b616..23bf618 100644 (file)
@@ -195,7 +195,7 @@ def check_result(mod, map_inputs, out_shape, result, tol=1e-5, target="llvm",
 
     def check_vm_result():
         compile_engine.get().clear()
-        with relay.build_config(opt_level=3):
+        with tvm.transform.PassContext(opt_level=3):
             exe = relay.vm.compile(mod, target=target, params=params)
         code, lib = exe.save()
         lib = update_lib(lib)
@@ -210,7 +210,7 @@ def check_result(mod, map_inputs, out_shape, result, tol=1e-5, target="llvm",
 
     def check_graph_runtime_result():
         compile_engine.get().clear()
-        with relay.build_config(opt_level=3):
+        with tvm.transform.PassContext(opt_level=3):
             json, lib, param = relay.build(mod, target=target, params=params)
         lib = update_lib(lib)
         rt_mod = tvm.contrib.graph_runtime.create(json, lib, ctx)
@@ -512,7 +512,7 @@ def test_function_lifting():
             transform.AlterOpLayout(),
         ])
 
-        with relay.build_config(opt_level=3):
+        with tvm.transform.PassContext(opt_level=3):
             mod = opt_pass(mod)
 
         return mod
@@ -595,7 +595,7 @@ def test_function_lifting_inline():
             transform.Inline(),
         ])
 
-        with relay.build_config(opt_level=3):
+        with tvm.transform.PassContext(opt_level=3):
             mod = opt_pass(mod)
 
         return mod
@@ -885,7 +885,8 @@ def test_dnnl_fuse():
             transform.PartitionGraph()
         ])
 
-        with relay.build_config(opt_level=3, disabled_pass=["AlterOpLayout"]):
+        with tvm.transform.PassContext(opt_level=3,
+                                       disabled_pass=["AlterOpLayout"]):
             return composite_partition(mod)
 
     def test_detect_pattern(pattern_table, include_bn, include_sigmoid,
index 537a5a2..e29038c 100644 (file)
@@ -27,7 +27,7 @@ from tvm import relay
 from tvm.relay.data_dep_optimization import simplify_fc_transpose
 
 def run_func(func, params, x):
-    with relay.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         graph, lib, new_params = relay.build(func, "llvm", params=params)
 
     from tvm.contrib import graph_runtime
index c4f0572..e0204ae 100644 (file)
@@ -46,7 +46,7 @@ def random_bsr_matrix(M, N, BS_R, BS_C, density, dtype="float32"):
     return s
 
 def run_func(func, params, x):
-    with relay.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         graph, lib, new_params = relay.build(func, "llvm", params=params)
 
     from tvm.contrib import graph_runtime
index fce7d2f..8473a67 100644 (file)
@@ -67,7 +67,7 @@ def test_mod_export():
 
         resnet18_mod, resnet18_params = relay.testing.resnet.get_workload(num_layers=18)
         resnet50_mod, resnet50_params = relay.testing.resnet.get_workload(num_layers=50)
-        with relay.build_config(opt_level=3):
+        with tvm.transform.PassContext(opt_level=3):
             _, resnet18_gpu_lib, _ = relay.build_module.build(resnet18_mod, "cuda", params=resnet18_params)
             _, resnet50_cpu_lib, _ = relay.build_module.build(resnet50_mod, "llvm", params=resnet50_params)
 
@@ -93,7 +93,7 @@ def test_mod_export():
                 return
 
         resnet18_mod, resnet18_params = relay.testing.resnet.get_workload(num_layers=18)
-        with relay.build_config(opt_level=3):
+        with tvm.transform.PassContext(opt_level=3):
             _, resnet18_cpu_lib, _ = relay.build_module.build(resnet18_mod, "llvm", params=resnet18_params)
 
         A = te.placeholder((1024,), name='A')
@@ -177,7 +177,7 @@ def test_mod_export():
                 return
 
         resnet18_mod, resnet18_params = relay.testing.resnet.get_workload(num_layers=18)
-        with relay.build_config(opt_level=3):
+        with tvm.transform.PassContext(opt_level=3):
             _, resnet18_cpu_lib, _ = relay.build_module.build(resnet18_mod, "llvm", params=resnet18_params)
 
         A = te.placeholder((1024,), name='A')
index 719ddfe..7cd5793 100644 (file)
@@ -31,7 +31,7 @@ def test_resnet18():
 
     def verify(data):
         mod, params = relay.testing.resnet.get_workload(num_layers=18)
-        with relay.build_config(opt_level=3):
+        with tvm.transform.PassContext(opt_level=3):
             graph, lib, graph_params = relay.build_module.build(mod, "llvm", params=params)
         ctx = tvm.cpu()
         module = graph_runtime.create(graph, lib, ctx)
@@ -42,7 +42,7 @@ def test_resnet18():
         return out
 
     resnet18_mod, resnet18_params = relay.testing.resnet.get_workload(num_layers=18)
-    with relay.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         graph, resnet18_gpu_lib, graph_params = relay.build_module.build(resnet18_mod, "cuda", params=resnet18_params)
 
     from tvm.contrib import util
index ffd3e8b..3b07097 100644 (file)
@@ -311,7 +311,7 @@ def tune_and_evaluate(tuning_opt):
     # compile kernels with history best records
     with autotvm.apply_history_best(log_file):
         print("Compile...")
-        with relay.build_config(opt_level=3):
+        with tvm.transform.PassContext(opt_level=3):
             graph, lib, params = relay.build_module.build(
                 mod, target=target, params=params)
 
index 4195075..a6fe45b 100644 (file)
@@ -222,7 +222,7 @@ def tune_and_evaluate(tuning_opt):
     # compile kernels with history best records
     with autotvm.apply_history_best(log_file):
         print("Compile...")
-        with relay.build_config(opt_level=3):
+        with tvm.transform.PassContext(opt_level=3):
             graph, lib, params = relay.build_module.build(
                 mod, target=target, params=params)
 
index ad74608..4748f41 100644 (file)
@@ -308,7 +308,7 @@ def tune_and_evaluate(tuning_opt):
     # compile kernels with history best records
     with autotvm.apply_history_best(log_file):
         print("Compile...")
-        with relay.build_config(opt_level=3):
+        with tvm.transform.PassContext(opt_level=3):
             graph, lib, params = relay.build_module.build(
                 mod, target=target, params=params, target_host=target_host)
         # export library
index 15ce2de..dcc5b25 100644 (file)
@@ -189,7 +189,7 @@ def tune_and_evaluate(tuning_opt):
     # compile kernels with graph-level best records
     with autotvm.apply_graph_best(graph_opt_sch_file):
         print("Compile...")
-        with relay.build_config(opt_level=3):
+        with tvm.transform.PassContext(opt_level=3):
             graph, lib, params = relay.build_module.build(
                 mod, target=target, params=params)
 
index 980d96c..df40733 100644 (file)
@@ -160,7 +160,7 @@ print(mod1)
 # however, provides a configuration interface
 # for users to customize the optimization level that they want to execute.
 
-with relay.build_config(opt_level=3):
+with tvm.transform.PassContext(opt_level=3):
     mod2 = seq(mod)
 print(mod2)
 
@@ -173,7 +173,7 @@ print(mod2)
 # EliminateCommonSubexpr as following. The printed module will again show two
 # identical addition operations.
 
-with relay.build_config(opt_level=3, disabled_pass=["EliminateCommonSubexpr"]):
+with tvm.transform.PassContext(opt_level=3, disabled_pass=["EliminateCommonSubexpr"]):
     mod3 = seq(mod)
 print(mod3)
 
@@ -182,12 +182,12 @@ print(mod3)
 # provides a means to make pass target-aware. For example, the layout
 # alteration pass falls in such category.
 
-with relay.build_config(opt_level=3):
+with tvm.transform.PassContext(opt_level=3):
     mod4 = seq(mod)
 print(mod4)
 
 seq1 = tvm.transform.Sequential([relay.transform.AlterOpLayout()])
-with relay.build_config(opt_level=3):
+with tvm.transform.PassContext(opt_level=3):
     with tvm.target.create("llvm"):
         mod5 = seq1(mod)
 print(mod5)
@@ -242,7 +242,7 @@ seq = tvm.transform.Sequential([relay.transform.FoldConstant(),
                                 relay.transform.EliminateCommonSubexpr(),
                                 relay.transform.FuseOps(),
                                 tvm.transform.PrintIR()])
-with relay.build_config(opt_level=3):
+with tvm.transform.PassContext(opt_level=3):
     mod = seq(mod)
 
 print("done")
index 6ac518e..19719a5 100644 (file)
@@ -336,7 +336,7 @@ func = relay.build_module.bind_params_by_name(func, params)
 mod = tvm.IRModule()
 mod["main"] = func
 # Build with Relay
-with relay.build_config(opt_level=0): # Currently only support opt_level=0
+with tvm.transform.PassContext(opt_level=0): # Currently only support opt_level=0
     graph, lib, params = relay.build(mod, target, params=params)
 
 # Generate graph runtime
index 17ec9cb..bc5b523 100644 (file)
@@ -263,7 +263,7 @@ input_name = 'input_1'
 shape_dict = {input_name: x.shape}
 mod, params = relay.frontend.from_keras(keras_mobilenet_v2, shape_dict)
 
-with relay.build_config(opt_level=3):
+with tvm.transform.PassContext(opt_level=3):
     graph, lib, params = relay.build(mod, target=target,
                                      target_host=target_host, params=params)
 
index ef707fe..25df341 100644 (file)
@@ -179,7 +179,7 @@ else:
     # The above line is a simple form of
     # target = tvm.target.create('llvm -device=arm_cpu -model=bcm2837 -target=armv7l-linux-gnueabihf -mattr=+neon')
 
-with relay.build_config(opt_level=3):
+with tvm.transform.PassContext(opt_level=3):
     graph, lib, params = relay.build(func, target, params=params)
 
 # After `relay.build`, you will get three return values: graph,
index 4027977..d6183d6 100644 (file)
@@ -81,7 +81,7 @@ def get_synset():
 
 
 def run_tvm_model(mod, params, input_name, inp, target="llvm"):
-    with relay.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         json, lib, params = relay.build(mod, target=target, params=params)
 
     runtime = tvm.contrib.graph_runtime.create(json, lib, tvm.context(target, 0))
index 5fd6837..ecd283a 100644 (file)
@@ -198,7 +198,7 @@ mod, params = relay.frontend.from_tflite(tflite_model,
 # Lets now the compile the Relay module. We use the "llvm" target here. Please replace it with the
 # target platform that you are interested in.
 target = 'llvm'
-with relay.build_config(opt_level=3):
+with tvm.transform.PassContext(opt_level=3):
     graph, lib, params = relay.build_module.build(mod, target=target,
                                                   params=params)
 
index 6126df0..e2fc3c5 100644 (file)
@@ -87,7 +87,7 @@ block = model_zoo.get_model(model_name, pretrained=True)
 
 def build(target):
     mod, params = relay.frontend.from_mxnet(block, {"data": dshape})
-    with relay.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         graph, lib, params = relay.build(mod, target, params=params)
     return graph, lib, params
 
index 8fad80d..5988525 100644 (file)
@@ -82,13 +82,13 @@ shape_dict = {input_name: data.shape}
 dtype_dict = {input_name: data.dtype}
 
 # parse Caffe2 model and convert into Relay computation graph
-from tvm import relay
+from tvm import relay, transform
 mod, params = relay.frontend.from_caffe2(resnet50.init_net, resnet50.predict_net, shape_dict, dtype_dict)
 
 # compile the model
 # target x86 CPU
 target = 'llvm'
-with relay.build_config(opt_level=3):
+with transform.PassContext(opt_level=3):
     graph, lib, params = relay.build(mod, target, params=params)
 
 ######################################################################
index 2a0c8db..beac483 100644 (file)
@@ -74,7 +74,7 @@ shape_dict = {'image': x.shape}
 # Parse CoreML model and convert into Relay computation graph
 mod, params = relay.frontend.from_coreml(mlmodel, shape_dict)
 
-with relay.build_config(opt_level=3):
+with tvm.transform.PassContext(opt_level=3):
     graph, lib, params = relay.build(mod,
                                      target,
                                      params=params)
index e2c1ea5..6d84463 100644 (file)
@@ -100,7 +100,7 @@ ctx = tvm.cpu(0)
 data = np.empty([batch_size, net.c, net.h, net.w], dtype)
 shape = {'data': data.shape}
 print("Compiling the model...")
-with relay.build_config(opt_level=3):
+with tvm.transform.PassContext(opt_level=3):
     graph, lib, params = relay.build(mod,
                                      target=target,
                                      target_host=target_host,
index 928a8ac..7ece790 100644 (file)
@@ -79,7 +79,7 @@ mod, params = relay.frontend.from_keras(keras_resnet50, shape_dict)
 # compile the model
 target = 'cuda'
 ctx = tvm.gpu(0)
-with relay.build_config(opt_level=3):
+with tvm.transform.PassContext(opt_level=3):
     executor = relay.build_module.create_executor('graph', mod, ctx, target)
 
 ######################################################################
index d0e4c4a..6e6b2d7 100644 (file)
@@ -90,7 +90,7 @@ func = relay.Function(func.params, relay.nn.softmax(func.body), None, func.type_
 ######################################################################
 # now compile the graph
 target = 'cuda'
-with relay.build_config(opt_level=3):
+with tvm.transform.PassContext(opt_level=3):
     graph, lib, params = relay.build(func, target, params=params)
 
 ######################################################################
index 766451c..9973a08 100644 (file)
@@ -74,7 +74,7 @@ input_name = '1'
 shape_dict = {input_name: x.shape}
 mod, params = relay.frontend.from_onnx(onnx_model, shape_dict)
 
-with relay.build_config(opt_level=1):
+with tvm.transform.PassContext(opt_level=1):
     intrp = relay.build_module.create_executor('graph', mod, tvm.cpu(0), target)
 
 ######################################################################
index 8354b0e..53d29a9 100644 (file)
@@ -101,7 +101,7 @@ mod, params = relay.frontend.from_pytorch(scripted_model,
 target = 'llvm'
 target_host = 'llvm'
 ctx = tvm.cpu(0)
-with relay.build_config(opt_level=3):
+with tvm.transform.PassContext(opt_level=3):
     graph, lib, params = relay.build(mod,
                                      target=target,
                                      target_host=target_host,
index 0ebd733..b7b3d69 100644 (file)
@@ -144,7 +144,7 @@ print("Tensorflow protobuf imported to relay frontend.")
 #   params: final params after compilation.
 #   lib: target library which can be deployed on target with TVM runtime.
 
-with relay.build_config(opt_level=3):
+with tvm.transform.PassContext(opt_level=3):
     graph, lib, params = relay.build(mod,
                                      target=target,
                                      target_host=target_host,
index e01a4ec..35a308c 100644 (file)
@@ -128,14 +128,14 @@ input_shape = (1, 224, 224, 3)
 input_dtype = "float32"
 
 # Parse TFLite model and convert it to a Relay module
-from tvm import relay
+from tvm import relay, transform
 mod, params = relay.frontend.from_tflite(tflite_model,
                                          shape_dict={input_tensor: input_shape},
                                          dtype_dict={input_tensor: input_dtype})
 
 # Build the module against to x86 CPU
 target = "llvm"
-with relay.build_config(opt_level=3):
+with transform.PassContext(opt_level=3):
     graph, lib, params = relay.build(mod, target, params=params)
 
 ######################################################################
index b2174a0..e52a99a 100644 (file)
@@ -96,7 +96,7 @@ print(mod.astext(show_meta_data=False))
 
 opt_level = 3
 target = tvm.target.cuda()
-with relay.build_config(opt_level=opt_level):
+with tvm.transform.PassContext(opt_level=opt_level):
     graph, lib, params = relay.build(mod, target, params=params)
 
 #####################################################################
index 26c240e..2d358d3 100644 (file)
@@ -127,7 +127,7 @@ def compile_network(opt, env, target):
 
     # Perform quantization in Relay
     # Note: We set opt_level to 3 in order to fold batch norm
-    with relay.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         with relay.quantize.qconfig(global_scale=8.0,
                                     skip_conv_layers=[0]):
             relay_prog = relay.quantize.quantize(mod["main"], params=params)
@@ -272,7 +272,7 @@ if __name__ == '__main__':
         # Compile network
         print("Compiling network with best tuning parameters...")
         if target.device_name != "vta":
-            with relay.build_config(opt_level=3, disabled_pass={"AlterOpLayout"}):
+            with tvm.transform.PassContext(opt_level=3, disabled_pass={"AlterOpLayout"}):
                 graph, lib, params = relay.build(
                     relay_prog, target=target,
                     params=params, target_host=env.target_host)
index 63106a5..a92b1ee 100644 (file)
@@ -92,7 +92,7 @@ def compile_network(env, target, model, start_pack, stop_pack):
 
     # Perform quantization in Relay
     # Note: We set opt_level to 3 in order to fold batch norm
-    with relay.build_config(opt_level=3):
+    with tvm.transform.PassContext(opt_level=3):
         with relay.quantize.qconfig(global_scale=8.0, skip_conv_layers=[0]):
             mod = relay.quantize.quantize(mod, params=params)
 
index 7ca4b98..3a36785 100644 (file)
@@ -171,7 +171,7 @@ with autotvm.tophub.context(target):
     if target.device_name == "vta":
         # Perform quantization in Relay
         # Note: We set opt_level to 3 in order to fold batch norm
-        with relay.build_config(opt_level=3):
+        with tvm.transform.PassContext(opt_level=3):
             with relay.quantize.qconfig(global_scale=8.0,
                                         skip_conv_layers=[0]):
                 mod = relay.quantize.quantize(mod, params=params)
index efcd2c4..5039488 100644 (file)
@@ -207,7 +207,7 @@ with autotvm.tophub.context(target):
     if target.device_name == "vta":
     # Perform quantization in Relay
     # Note: We set opt_level to 3 in order to fold batch norm
-        with relay.build_config(opt_level=3):
+        with tvm.transform.PassContext(opt_level=3):
             with relay.quantize.qconfig(global_scale=33.0,
                                         skip_conv_layers=[0],
                                         store_lowbit_output=True,