From c55ed371693740291b82cc8d88bf09c830d029c7 Mon Sep 17 00:00:00 2001 From: Zhi <5145158+zhiics@users.noreply.github.com> Date: Fri, 29 May 2020 21:59:35 -0700 Subject: [PATCH] [REFACTOR][RELAY] Replace build_config with PassContext (#5698) --- apps/android_camera/models/prepare_model.py | 2 +- apps/benchmark/arm_cpu_imagenet_bench.py | 2 +- apps/benchmark/gpu_imagenet_bench.py | 2 +- apps/benchmark/mobile_gpu_imagenet_bench.py | 2 +- apps/bundle_deploy/build_model.py | 2 +- apps/sgx/src/build_model.py | 2 +- golang/sample/gen_mobilenet_lib.py | 4 ++-- python/tvm/relay/frontend/common.py | 2 +- python/tvm/relay/quantize/_calibrate.py | 3 +-- python/tvm/relay/transform/transform.py | 10 +++++++--- rust/frontend/examples/resnet/src/build_resnet.py | 4 ++-- src/relay/backend/build_module.cc | 5 ++--- tests/cpp/relay_transform_sequential.cc | 2 +- tests/python/frontend/caffe2/test_forward.py | 2 +- tests/python/frontend/coreml/test_forward.py | 4 ++-- tests/python/frontend/keras/test_forward.py | 2 +- tests/python/frontend/mxnet/test_forward.py | 2 +- tests/python/frontend/mxnet/test_qnn_ops_utils.py | 6 +++--- tests/python/frontend/onnx/test_forward.py | 2 +- tests/python/frontend/pytorch/qnn_test.py | 2 +- tests/python/frontend/pytorch/test_forward.py | 4 ++-- tests/python/frontend/tensorflow/test_bn_dynamic.py | 2 +- tests/python/frontend/tensorflow/test_forward.py | 4 ++-- tests/python/frontend/tflite/test_forward.py | 2 +- .../nightly/quantization/test_quantization_accuracy.py | 4 ++-- tests/python/relay/benchmarking/benchmark_vm.py | 4 ++-- tests/python/relay/test_backend_compile_engine.py | 2 +- tests/python/relay/test_backend_graph_runtime.py | 2 +- tests/python/relay/test_cpp_build_module.py | 2 +- tests/python/relay/test_external_codegen.py | 6 ++++-- tests/python/relay/test_memory_passes.py | 2 +- tests/python/relay/test_op_fast_math.py | 2 +- tests/python/relay/test_op_level2.py | 10 +++++----- tests/python/relay/test_op_qnn_conv2d.py | 10 +++++----- tests/python/relay/test_op_qnn_dense.py | 2 +- tests/python/relay/test_op_qnn_dequantize.py | 2 +- tests/python/relay/test_op_qnn_quantize.py | 2 +- tests/python/relay/test_op_qnn_requantize.py | 2 +- tests/python/relay/test_pass_annotate_target.py | 4 ++-- tests/python/relay/test_pass_fast_math.py | 4 ++-- tests/python/relay/test_pass_fold_constant.py | 2 +- tests/python/relay/test_pass_manager.py | 12 ++++++------ tests/python/relay/test_pass_partition_graph.py | 11 ++++++----- tests/python/relay/test_simplify_fc_transpose.py | 2 +- tests/python/relay/test_sparse_dense_convert.py | 2 +- tests/python/unittest/test_runtime_module_export.py | 6 +++--- tests/python/unittest/test_target_codegen_blob.py | 4 ++-- tutorials/autotvm/tune_relay_arm.py | 2 +- tutorials/autotvm/tune_relay_cuda.py | 2 +- tutorials/autotvm/tune_relay_mobile_gpu.py | 2 +- tutorials/autotvm/tune_relay_x86.py | 2 +- tutorials/dev/relay_pass_infra.py | 10 +++++----- tutorials/frontend/build_gcn.py | 2 +- tutorials/frontend/deploy_model_on_android.py | 2 +- tutorials/frontend/deploy_model_on_rasp.py | 2 +- tutorials/frontend/deploy_prequantized.py | 2 +- tutorials/frontend/deploy_prequantized_tflite.py | 2 +- tutorials/frontend/deploy_ssd_gluoncv.py | 2 +- tutorials/frontend/from_caffe2.py | 4 ++-- tutorials/frontend/from_coreml.py | 2 +- tutorials/frontend/from_darknet.py | 2 +- tutorials/frontend/from_keras.py | 2 +- tutorials/frontend/from_mxnet.py | 2 +- tutorials/frontend/from_onnx.py | 2 +- tutorials/frontend/from_pytorch.py | 2 +- tutorials/frontend/from_tensorflow.py | 2 +- tutorials/frontend/from_tflite.py | 4 ++-- tutorials/relay_quick_start.py | 2 +- vta/scripts/tune_resnet.py | 4 ++-- vta/tutorials/autotvm/tune_relay_vta.py | 2 +- vta/tutorials/frontend/deploy_classification.py | 2 +- vta/tutorials/frontend/deploy_detection.py | 2 +- 72 files changed, 121 insertions(+), 116 deletions(-) diff --git a/apps/android_camera/models/prepare_model.py b/apps/android_camera/models/prepare_model.py index 36674d2..703a465 100644 --- a/apps/android_camera/models/prepare_model.py +++ b/apps/android_camera/models/prepare_model.py @@ -87,7 +87,7 @@ def main(model_str, output_path): except FileExistsError: pass print("building...") - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(net, target, target_host=target_host, params=params) print("dumping lib...") lib.export_library(output_path_str + '/' + 'deploy_lib_cpu.so', ndk.create_shared) diff --git a/apps/benchmark/arm_cpu_imagenet_bench.py b/apps/benchmark/arm_cpu_imagenet_bench.py index 53b6168..f319d5a 100644 --- a/apps/benchmark/arm_cpu_imagenet_bench.py +++ b/apps/benchmark/arm_cpu_imagenet_bench.py @@ -39,7 +39,7 @@ def evaluate_network(network, target, target_host, repeat): net, params, input_shape, output_shape = get_network(network, batch_size=1) print_progress("%-20s building..." % network) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build( net, target=target, target_host=target_host, params=params) diff --git a/apps/benchmark/gpu_imagenet_bench.py b/apps/benchmark/gpu_imagenet_bench.py index 0023700..a3df2c4 100644 --- a/apps/benchmark/gpu_imagenet_bench.py +++ b/apps/benchmark/gpu_imagenet_bench.py @@ -33,7 +33,7 @@ from util import get_network def benchmark(network, target): net, params, input_shape, output_shape = get_network(network, batch_size=1) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(net, target=target, params=params) # create runtime diff --git a/apps/benchmark/mobile_gpu_imagenet_bench.py b/apps/benchmark/mobile_gpu_imagenet_bench.py index 4f93a0d..83127ff 100644 --- a/apps/benchmark/mobile_gpu_imagenet_bench.py +++ b/apps/benchmark/mobile_gpu_imagenet_bench.py @@ -38,7 +38,7 @@ def evaluate_network(network, target, target_host, dtype, repeat): net, params, input_shape, output_shape = get_network(network, batch_size=1, dtype=dtype) print_progress("%-20s building..." % network) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build( net, target=target, target_host=target_host, params=params) diff --git a/apps/bundle_deploy/build_model.py b/apps/bundle_deploy/build_model.py index 63d658e..1d415cd 100644 --- a/apps/bundle_deploy/build_model.py +++ b/apps/bundle_deploy/build_model.py @@ -33,7 +33,7 @@ def build_module(opts): func = mod["main"] func = relay.Function(func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build( func, 'llvm --system-lib', params=params) diff --git a/apps/sgx/src/build_model.py b/apps/sgx/src/build_model.py index 6e0933e..b988574 100755 --- a/apps/sgx/src/build_model.py +++ b/apps/sgx/src/build_model.py @@ -37,7 +37,7 @@ def main(): net, params = relay.testing.resnet.get_workload( layers=18, batch_size=dshape[0], image_shape=dshape[1:]) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build( net, 'llvm --system-lib', params=params) diff --git a/golang/sample/gen_mobilenet_lib.py b/golang/sample/gen_mobilenet_lib.py index 8becd07..d4dcf21 100644 --- a/golang/sample/gen_mobilenet_lib.py +++ b/golang/sample/gen_mobilenet_lib.py @@ -16,7 +16,7 @@ # under the License. import os -from tvm import relay +from tvm import relay, transform from tvm.contrib.download import download_testdata @@ -77,7 +77,7 @@ mod, params = relay.frontend.from_tflite(tflite_model, target = 'llvm' # Build with Relay -with relay.build_config(opt_level=3): +with transform.PassContext(opt_level=3): graph, lib, params = relay.build_module.build( mod, target, params=params) diff --git a/python/tvm/relay/frontend/common.py b/python/tvm/relay/frontend/common.py index e86890f..05222c6 100644 --- a/python/tvm/relay/frontend/common.py +++ b/python/tvm/relay/frontend/common.py @@ -505,7 +505,7 @@ def infer_value(input_val, params, mod=None): assert all(var.name_hint in params.keys() for var in analysis.free_vars( input_val)), "All inputs to infer must be available in params." func = _function.Function(analysis.free_vars(input_val), input_val) - with tvm.relay.build_config(opt_level=0): + with tvm.transform.PassContext(opt_level=0): graph, lib, params = tvm.relay.build(func, target="llvm", params=params) ctx = tvm.cpu(0) m = graph_runtime.create(graph, lib, ctx) diff --git a/python/tvm/relay/quantize/_calibrate.py b/python/tvm/relay/quantize/_calibrate.py index 9794698..59ee51b 100644 --- a/python/tvm/relay/quantize/_calibrate.py +++ b/python/tvm/relay/quantize/_calibrate.py @@ -28,7 +28,6 @@ from . import quantize from .. import op as _op from .. import expr as _expr from .. import analysis as _analysis -from .. import transform as _transform from .. import build_module as _build_module from ...contrib import graph_runtime from .kl_divergence import _find_scale_by_kl @@ -45,7 +44,7 @@ def _get_profile_runtime(mod): target = 'llvm' ctx = tvm.context(target) - with _transform.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = _build_module.build(func, target=target) runtime = graph_runtime.create(graph, lib, ctx) runtime.set_input(**params) diff --git a/python/tvm/relay/transform/transform.py b/python/tvm/relay/transform/transform.py index 19ddb32..8f4ec10 100644 --- a/python/tvm/relay/transform/transform.py +++ b/python/tvm/relay/transform/transform.py @@ -21,6 +21,7 @@ Relay pass transformation infrastructure. import types import inspect import functools +import warnings import tvm.ir from tvm import te @@ -34,7 +35,9 @@ def build_config(opt_level=2, required_pass=None, disabled_pass=None, trace=None): - """Configure the build behavior by setting config variables. + """Configure the build behavior by setting config variables. This function + will be deprecated in TVM v0.7. Instead, we should directly use + tvm.transform.PassContext. Parameters ---------- @@ -72,8 +75,9 @@ def build_config(opt_level=2, pass_context: PassContext The pass context for optimizations. """ - return tvm.ir.transform.PassContext(opt_level, required_pass, - disabled_pass, trace) + warnings.warn("relay.build_config will be deprecated. Please use \ + tvm.transform.PassContext directly", DeprecationWarning) + return tvm.transform.PassContext(opt_level, required_pass, disabled_pass, trace) @tvm._ffi.register_object("relay.FunctionPass") diff --git a/rust/frontend/examples/resnet/src/build_resnet.py b/rust/frontend/examples/resnet/src/build_resnet.py index 49c67bf..a09a0c3 100644 --- a/rust/frontend/examples/resnet/src/build_resnet.py +++ b/rust/frontend/examples/resnet/src/build_resnet.py @@ -75,8 +75,8 @@ def build(target_dir): num_layers=18, batch_size=batch_size, image_shape=image_shape) # compile the model - with relay.build_config(opt_level=opt_level): - graph, lib, params = relay.build_module.build(net, target, params=params) + with tvm.transform.PassContext(opt_level=opt_level): + graph, lib, params = relay.build_module.build(net, target, params=params) # save the model artifacts lib.save(deploy_lib) diff --git a/src/relay/backend/build_module.cc b/src/relay/backend/build_module.cc index abce068..7aad766 100644 --- a/src/relay/backend/build_module.cc +++ b/src/relay/backend/build_module.cc @@ -304,9 +304,8 @@ class RelayBuildModule : public runtime::ModuleNode { // Handle heterogeneous compilation. transform::PassContext pass_ctx = PassContext::Current(); if (targets_.size() > 1) { - Optional opt_fallback_dev = - pass_ctx->GetConfig("relay.fallback_device_type", - IntImm(runtime::DataType::Int(32), static_cast(kDLCPU))); + Optional opt_fallback_dev = + pass_ctx->GetConfig("relay.fallback_device_type", Integer(static_cast(kDLCPU))); auto fallback_dev = opt_fallback_dev.value(); CHECK_GT(fallback_dev->value, 0U); relay_module = RunDeviceAnnotationPass(relay_module, fallback_dev->value); diff --git a/tests/cpp/relay_transform_sequential.cc b/tests/cpp/relay_transform_sequential.cc index 60d3a5e..f08d557 100644 --- a/tests/cpp/relay_transform_sequential.cc +++ b/tests/cpp/relay_transform_sequential.cc @@ -70,7 +70,7 @@ TEST(Relay, Sequential) { auto mod = IRModule::FromExpr(func); auto pass_ctx = relay::transform::PassContext::Create(); pass_ctx->opt_level = 3; - pass_ctx->config.Set("relay.fallback_device_type", IntImm(DataType::Int(32), 1)); + pass_ctx->config.Set("relay.fallback_device_type", Integer(1)); { tvm::With ctx_scope(pass_ctx); tvm::With tctx(tvm::Target::Create("llvm")); diff --git a/tests/python/frontend/caffe2/test_forward.py b/tests/python/frontend/caffe2/test_forward.py index f052872..50a8781 100644 --- a/tests/python/frontend/caffe2/test_forward.py +++ b/tests/python/frontend/caffe2/test_forward.py @@ -43,7 +43,7 @@ def get_tvm_output(model, dtype_dict = {input_names: input_data.dtype} mod, params = relay.frontend.from_caffe2( model.init_net, model.predict_net, shape_dict, dtype_dict) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target, params=params) m = graph_runtime.create(graph, lib, ctx) diff --git a/tests/python/frontend/coreml/test_forward.py b/tests/python/frontend/coreml/test_forward.py index 3a15638..179f5b4 100644 --- a/tests/python/frontend/coreml/test_forward.py +++ b/tests/python/frontend/coreml/test_forward.py @@ -33,7 +33,7 @@ import model_zoo def get_tvm_output(func, x, params, target, ctx, out_shape=(1, 1000), input_name='image', dtype='float32'): - with relay.transform.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(func, target, params=params) m = graph_runtime.create(graph, lib, ctx) # set inputs @@ -76,7 +76,7 @@ def run_tvm_graph(coreml_model, target, ctx, input_data, input_name, output_shap dtype_dict = {input_name: input_data.dtype} mod, params = relay.frontend.from_coreml(coreml_model, shape_dict) - with relay.transform.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target, params=params) from tvm.contrib import graph_runtime diff --git a/tests/python/frontend/keras/test_forward.py b/tests/python/frontend/keras/test_forward.py index ed0181f..9b963c3 100644 --- a/tests/python/frontend/keras/test_forward.py +++ b/tests/python/frontend/keras/test_forward.py @@ -84,7 +84,7 @@ def verify_keras_frontend(keras_model, need_transpose=True, layout='NCHW'): def get_tvm_output(xs, target, ctx, dtype='float32'): shape_dict = {name: x.shape for (name, x) in zip(keras_model.input_names, xs)} mod, params = relay.frontend.from_keras(keras_model, shape_dict, layout=layout) - with relay.transform.build_config(opt_level=2): + with tvm.transform.PassContext(opt_level=2): graph, lib, params = relay.build(mod, target, params=params) diff --git a/tests/python/frontend/mxnet/test_forward.py b/tests/python/frontend/mxnet/test_forward.py index 6d36ea3..5ed2fb8 100644 --- a/tests/python/frontend/mxnet/test_forward.py +++ b/tests/python/frontend/mxnet/test_forward.py @@ -66,7 +66,7 @@ def verify_mxnet_frontend_impl(mx_symbol, shape_dict, arg_params=args, aux_params=auxs) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target, params=params) m = graph_runtime.create(graph, lib, ctx) # set inputs diff --git a/tests/python/frontend/mxnet/test_qnn_ops_utils.py b/tests/python/frontend/mxnet/test_qnn_ops_utils.py index d130eef..541162d 100644 --- a/tests/python/frontend/mxnet/test_qnn_ops_utils.py +++ b/tests/python/frontend/mxnet/test_qnn_ops_utils.py @@ -15,8 +15,8 @@ # specific language governing permissions and limitations # under the License. -import tvm import numpy as np +import tvm from tvm import relay from tvm.contrib import graph_runtime from tvm.relay.frontend.mxnet_qnn_op_utils import dequantize_mxnet_min_max, \ @@ -39,7 +39,7 @@ def test_mkldnn_dequantize(): in_dtype=in_dtype) mod = relay.Function(relay.analysis.free_vars(dequantized_output), dequantized_output) mod = tvm.IRModule.from_expr(mod) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, "llvm", params=None) rt_mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) rt_mod.set_input(input_data=in_data) @@ -93,7 +93,7 @@ def test_mkldnn_quantize(): out_dtype=out_dtype) mod = relay.Function(relay.analysis.free_vars(quantized_output), quantized_output) mod = tvm.IRModule.from_expr(mod) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, "llvm", params=None) rt_mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) rt_mod.set_input(input_data=in_data) diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index cd01294..edc33b7 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -65,7 +65,7 @@ def get_tvm_output(graph_def, input_data, target, ctx, output_shape=None, output mod, params = relay.frontend.from_onnx(graph_def, shape_dict, opset=opset) - with relay.build_config(opt_level=1): + with tvm.transform.PassContext(opt_level=1): graph, lib, params = relay.build(mod, target, params=params) diff --git a/tests/python/frontend/pytorch/qnn_test.py b/tests/python/frontend/pytorch/qnn_test.py index bf5fa98..551cdc4 100644 --- a/tests/python/frontend/pytorch/qnn_test.py +++ b/tests/python/frontend/pytorch/qnn_test.py @@ -41,7 +41,7 @@ def get_tvm_runtime(script_module, input_name, ishape): input_shapes = [(input_name, ishape)] mod, params = relay.frontend.from_pytorch(script_module, input_shapes) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): # test on only cpu for now, torch cannot run quant models on cuda # also not to make CI too slow json, lib, params = relay.build(mod, target="llvm", params=params) diff --git a/tests/python/frontend/pytorch/test_forward.py b/tests/python/frontend/pytorch/test_forward.py index 6159bb8..f6edbf1 100644 --- a/tests/python/frontend/pytorch/test_forward.py +++ b/tests/python/frontend/pytorch/test_forward.py @@ -176,7 +176,7 @@ def verify_model(model_name, input_data=[], compiled_input = dict(zip(input_names, [inp.cpu().numpy() for inp in baseline_input])) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): for target, ctx in ctx_list: relay_graph, relay_lib, relay_params = relay.build(mod, target=target, params=params) relay_model = graph_runtime.create(relay_graph, relay_lib, ctx) @@ -2294,7 +2294,7 @@ def test_forward_pretrained_bert_base_uncased(): # ---------------------------- target = 'llvm' - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): relay_graph, relay_lib, relay_params = relay.build(mod, target=target, params=params) ###################################################################### diff --git a/tests/python/frontend/tensorflow/test_bn_dynamic.py b/tests/python/frontend/tensorflow/test_bn_dynamic.py index a2d6903..e80d774 100644 --- a/tests/python/frontend/tensorflow/test_bn_dynamic.py +++ b/tests/python/frontend/tensorflow/test_bn_dynamic.py @@ -50,7 +50,7 @@ def verify_fused_batch_norm(shape): continue mod, params = relay.frontend.from_tensorflow(constant_graph, outputs=['output']) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target=device, params=params) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index c6a285c..89a0335 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -123,7 +123,7 @@ def run_tvm_graph(graph_def, input_data, input_node, num_output=1, result = ex.evaluate()(*inputs) return vmobj_to_list(result) else: - with relay.build_config(opt_level=opt_level): + with tvm.transform.PassContext(opt_level=opt_level): graph, lib, params = relay.build(mod, target, target_host, params) ctx = tvm.context(target, 0) @@ -2307,7 +2307,7 @@ def test_forward_ptb(): 'Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_c': 'float32', 'Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_h': 'float32'} target = 'llvm' - with relay.build_config(opt_level=0): + with tvm.transform.PassContext(opt_level=0): graph, lib, params = relay.build(mod, target, params=params) diff --git a/tests/python/frontend/tflite/test_forward.py b/tests/python/frontend/tflite/test_forward.py index a68fd90..24b82c6 100644 --- a/tests/python/frontend/tflite/test_forward.py +++ b/tests/python/frontend/tflite/test_forward.py @@ -109,7 +109,7 @@ def run_tvm_graph(tflite_model_buf, input_data, input_node, num_output=1, target shape_dict=shape_dict, dtype_dict=dtype_dict) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target, params=params) ctx = tvm.context(target, 0) diff --git a/tests/python/nightly/quantization/test_quantization_accuracy.py b/tests/python/nightly/quantization/test_quantization_accuracy.py index 4818cc6..d4b55f1 100644 --- a/tests/python/nightly/quantization/test_quantization_accuracy.py +++ b/tests/python/nightly/quantization/test_quantization_accuracy.py @@ -66,7 +66,7 @@ def get_model(model_name, batch_size, qconfig, target=None, original=False, simu mod, params = relay.frontend.from_mxnet(gluon_model, {"data": data_shape}) net = mod['main'] - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): qfunc = relay.quantize.prerequisite_optimize(net, params=params) logging.debug('original') logging.debug(qfunc.astext(show_meta_data=False)) @@ -83,7 +83,7 @@ def get_model(model_name, batch_size, qconfig, target=None, original=False, simu def eval_acc(model, dataset, batch_fn, target=tvm.target.cuda(), ctx=tvm.gpu(), log_interval=100): - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(model, target) # create runtime module m = tvm.contrib.graph_runtime.create(graph, lib, ctx) diff --git a/tests/python/relay/benchmarking/benchmark_vm.py b/tests/python/relay/benchmarking/benchmark_vm.py index 1e9030c..a6e05be 100644 --- a/tests/python/relay/benchmarking/benchmark_vm.py +++ b/tests/python/relay/benchmarking/benchmark_vm.py @@ -36,7 +36,7 @@ def benchmark_execution(mod, model="unknown"): def get_graph_runtime_output(mod, data, params, target, ctx, dtype='float32', number=2, repeat=20): - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target, params=params) m = graph_runtime.create(graph, lib, ctx) @@ -59,7 +59,7 @@ def benchmark_execution(mod, def get_vm_output(mod, data, params, target, ctx, dtype='float32', number=2, repeat=20): - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): exe = vm.compile(mod, target, params=params) rly_vm = vm_rt.VirtualMachine(exe) rly_vm.init(ctx) diff --git a/tests/python/relay/test_backend_compile_engine.py b/tests/python/relay/test_backend_compile_engine.py index eb018fe..1b4e08f 100644 --- a/tests/python/relay/test_backend_compile_engine.py +++ b/tests/python/relay/test_backend_compile_engine.py @@ -184,7 +184,7 @@ def test_compile_placeholder_bypass(): z = relay.var("z", shape=(2, 3)) result = relay.Tuple([x, relay.op.concatenate([y, z], axis=0)]) func = relay.Function(relay.analysis.free_vars(result), result) - with relay.build_config(opt_level=0): + with tvm.transform.PassContext(opt_level=0): graph, lib, params = relay.build(tvm.IRModule.from_expr(func), 'llvm') diff --git a/tests/python/relay/test_backend_graph_runtime.py b/tests/python/relay/test_backend_graph_runtime.py index 226d5ba..f0785bc 100644 --- a/tests/python/relay/test_backend_graph_runtime.py +++ b/tests/python/relay/test_backend_graph_runtime.py @@ -166,7 +166,7 @@ def test_gru_like(): z = unit(rnn_dim) for target, ctx in ctx_list(): - with relay.build_config(opt_level=2): + with tvm.transform.PassContext(opt_level=2): graph, lib, params = relay.build(tvm.IRModule.from_expr(z), target) m = graph_runtime.create(graph, lib, ctx) m.set_input("X", tvm.nd.array(x.astype(dtype))) diff --git a/tests/python/relay/test_cpp_build_module.py b/tests/python/relay/test_cpp_build_module.py index 171b6b0..8d54384 100644 --- a/tests/python/relay/test_cpp_build_module.py +++ b/tests/python/relay/test_cpp_build_module.py @@ -115,7 +115,7 @@ def test_fp16_conversion(): X = tvm.nd.array(n * np.random.randn(n).astype(src) - n / 2) # build - with relay.build_config(opt_level=1): + with tvm.transform.PassContext(opt_level=1): g_json, mmod, params = relay.build(tvm.IRModule.from_expr(func), tgt) # test diff --git a/tests/python/relay/test_external_codegen.py b/tests/python/relay/test_external_codegen.py index 3797910..c449ce3 100644 --- a/tests/python/relay/test_external_codegen.py +++ b/tests/python/relay/test_external_codegen.py @@ -49,7 +49,8 @@ def check_result(mod, map_inputs, out_shape, result, tol=1e-5, target="llvm", return lib def check_vm_result(): - with relay.build_config(opt_level=3, disabled_pass=["AlterOpLayout"]): + with tvm.transform.PassContext(opt_level=3, + disabled_pass=["AlterOpLayout"]): exe = relay.vm.compile(mod, target=target) code, lib = exe.save() lib = update_lib(lib) @@ -60,7 +61,8 @@ def check_result(mod, map_inputs, out_shape, result, tol=1e-5, target="llvm", tvm.testing.assert_allclose(out.asnumpy(), result, rtol=tol, atol=tol) def check_graph_runtime_result(): - with relay.build_config(opt_level=3, disabled_pass=["AlterOpLayout"]): + with tvm.transform.PassContext(opt_level=3, + disabled_pass=["AlterOpLayout"]): json, lib, _ = relay.build(mod, target=target) lib = update_lib(lib) rt_mod = tvm.contrib.graph_runtime.create(json, lib, ctx) diff --git a/tests/python/relay/test_memory_passes.py b/tests/python/relay/test_memory_passes.py index 70e7086..dc16865 100644 --- a/tests/python/relay/test_memory_passes.py +++ b/tests/python/relay/test_memory_passes.py @@ -37,7 +37,7 @@ def check_memory_plan(func, check_fn): no_plan_result = ex.evaluate(mod['main'])(*args) # Compute with memory planning. - with relay.build_config(opt_level=1, disabled_pass=["MemoryPlan"]): + with tvm.transform.PassContext(opt_level=1, disabled_pass=["MemoryPlan"]): plan_result = ex.evaluate(mod['main'])(*args) # Compute Python result. diff --git a/tests/python/relay/test_op_fast_math.py b/tests/python/relay/test_op_fast_math.py index 215b83e..a771d29 100644 --- a/tests/python/relay/test_op_fast_math.py +++ b/tests/python/relay/test_op_fast_math.py @@ -34,7 +34,7 @@ def test_fastmath(): func = relay.Function([x], y) mod = tvm.IRModule.from_expr(func) - with relay.build_config(opt_level=3, required_pass=['FastMath']): + with tvm.transform.PassContext(opt_level=3, required_pass=['FastMath']): graph, lib, params = relay.build(mod, target="llvm", params=None) # Check that the op related to fast math have been convered to function in lib diff --git a/tests/python/relay/test_op_level2.py b/tests/python/relay/test_op_level2.py index 68eced3..3e8720d 100644 --- a/tests/python/relay/test_op_level2.py +++ b/tests/python/relay/test_op_level2.py @@ -262,7 +262,7 @@ def test_conv2d_run(): with open(temp.relpath("temp.log"), "w") as log_file: log_file.write(test_schedule) with autotvm.apply_history_best(temp.relpath("temp.log")): - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): print('Compiling...') graph_json, mod, params = tvm.relay.build(mod, target="llvm -device=arm_cpu") @@ -356,7 +356,7 @@ def test_conv2d_winograd(): data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, groups=groups) - with WinogradFallback(), relay.build_config(opt_level=3): + with WinogradFallback(), tvm.transform.PassContext(opt_level=3): for target, ctx in ctx_list(): if target != 'cuda': continue @@ -578,7 +578,7 @@ def test_conv3d_winograd(): data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, groups=groups) - with WinogradFallback(), relay.build_config(opt_level=3): + with WinogradFallback(), tvm.transform.PassContext(opt_level=3): for target, ctx in ctx_list(): if target != 'cuda': continue @@ -1199,7 +1199,7 @@ def test_conv2d_int8_intrinsics(): wdata = np.random.rand(*kernel_shape) * 10 parameters = {"weight": tvm.nd.array(wdata.astype(weight_dtype))} - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(func, target, params=parameters) assembly = lib.get_source("asm") @@ -1314,7 +1314,7 @@ def test_depthwise_conv2d_int8(): llvm_version = tvm.target.codegen.llvm_version_major() for target in targets: if llvm_version >= 8: - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(func, target, params=parameters) diff --git a/tests/python/relay/test_op_qnn_conv2d.py b/tests/python/relay/test_op_qnn_conv2d.py index 6911c52..fcb335f 100644 --- a/tests/python/relay/test_op_qnn_conv2d.py +++ b/tests/python/relay/test_op_qnn_conv2d.py @@ -182,7 +182,7 @@ def verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, def get_output(func, golden_inputs): - with relay.build_config(opt_level=2): + with tvm.transform.PassContext(opt_level=2): golden_data, golden_weight = golden_inputs params = {'kernel': golden_weight} graph, lib, params = relay.build(func, "llvm", params=params) @@ -655,7 +655,7 @@ def test_tflite_large_irregular(): golden_data = np.full(data_shape, 127).astype('uint8') golden_weight = np.full(kernel_shape, 127).astype('uint8') - with relay.build_config(opt_level=2): + with tvm.transform.PassContext(opt_level=2): params = {'kernel': golden_weight} graph, lib, params = relay.build(qnn_func, "llvm", params=params) mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) @@ -698,7 +698,7 @@ def test_tflite_output_multiplier_greater_than_one(): -1, -1, 1, 1)).reshape(kernel_shape) golden_weight = golden_weight.astype('uint8') - with relay.build_config(opt_level=2): + with tvm.transform.PassContext(opt_level=2): params = {'kernel': golden_weight} graph, lib, params = relay.build(qnn_func, "llvm", params=params) mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) @@ -744,7 +744,7 @@ def test_tflite_anistropic_strides(): golden_weight = np.array((129, 131, 133, 135)).reshape(kernel_shape) golden_weight = golden_weight.astype('uint8') - with relay.build_config(opt_level=2): + with tvm.transform.PassContext(opt_level=2): params = {'kernel': golden_weight} graph, lib, params = relay.build(qnn_func, "llvm", params=params) mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) @@ -789,7 +789,7 @@ def test_broadcast_layout(): func = relay.add(func, bias) func = relay.Function(relay.analysis.free_vars(func), func) mod = tvm.IRModule.from_expr(func) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, "llvm -mcpu=skylake-avx512") def test_depthwise_depth_multiplier(): diff --git a/tests/python/relay/test_op_qnn_dense.py b/tests/python/relay/test_op_qnn_dense.py index 3cfcfd1..0ba3210 100644 --- a/tests/python/relay/test_op_qnn_dense.py +++ b/tests/python/relay/test_op_qnn_dense.py @@ -167,7 +167,7 @@ def qnn_dense_driver(test_configuration): mod = relay.Function(relay.analysis.free_vars(mod), mod) mod = tvm.IRModule.from_expr(mod) mod = relay.qnn.transform.CanonicalizeOps()(mod) - with relay.build_config(opt_level=2): + with tvm.transform.PassContext(opt_level=2): graph, lib, params = relay.build(mod, "llvm", params=None) mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) mod.set_input(quantized_data_name, test_configuration[quantized_data_name]) diff --git a/tests/python/relay/test_op_qnn_dequantize.py b/tests/python/relay/test_op_qnn_dequantize.py index febf5c5..3c82b7f 100644 --- a/tests/python/relay/test_op_qnn_dequantize.py +++ b/tests/python/relay/test_op_qnn_dequantize.py @@ -30,7 +30,7 @@ def quantize_test_driver(in_dtype, quant_args, in_data, verify_output_data): input_zero_point=input_zero_point) mod = relay.Function(relay.analysis.free_vars(quantized_output), quantized_output) mod = tvm.IRModule.from_expr(mod) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, "llvm", params=None) rt_mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) rt_mod.set_input(input_data=in_data) diff --git a/tests/python/relay/test_op_qnn_quantize.py b/tests/python/relay/test_op_qnn_quantize.py index 09b04d8..a284e8b 100644 --- a/tests/python/relay/test_op_qnn_quantize.py +++ b/tests/python/relay/test_op_qnn_quantize.py @@ -32,7 +32,7 @@ def quantize_test_driver(in_dtype, quant_args, axis, out_dtype, in_data, verify_ out_dtype=out_dtype) mod = relay.Function(relay.analysis.free_vars(quantized_output), quantized_output) mod = tvm.IRModule.from_expr(mod) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, "llvm", params=None) rt_mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) rt_mod.set_input(input_data=in_data) diff --git a/tests/python/relay/test_op_qnn_requantize.py b/tests/python/relay/test_op_qnn_requantize.py index 8123397..fb52b30 100644 --- a/tests/python/relay/test_op_qnn_requantize.py +++ b/tests/python/relay/test_op_qnn_requantize.py @@ -24,7 +24,7 @@ from tvm.contrib import graph_runtime roundings = ["UPWARD", "TONEAREST"] def verify(mod, goldens): - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, "llvm", params=None) golden_data, golden_output = goldens rt_mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) diff --git a/tests/python/relay/test_pass_annotate_target.py b/tests/python/relay/test_pass_annotate_target.py index 01ba9b6..0583946 100644 --- a/tests/python/relay/test_pass_annotate_target.py +++ b/tests/python/relay/test_pass_annotate_target.py @@ -52,7 +52,7 @@ def check_result(mod, map_inputs, out_shape, result, tol=1e-5, target="llvm", return lib def check_vm_result(): - with relay.build_config(opt_level=3, disabled_pass=["AlterOpLayout"]): + with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]): exe = relay.vm.compile(mod, target=target, params=params) code, lib = exe.save() lib = update_lib(lib) @@ -63,7 +63,7 @@ def check_result(mod, map_inputs, out_shape, result, tol=1e-5, target="llvm", tvm.testing.assert_allclose(out.asnumpy(), result, rtol=tol, atol=tol) def check_graph_runtime_result(): - with relay.build_config(opt_level=3, disabled_pass=["AlterOpLayout"]): + with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]): json, lib, param = relay.build(mod, target=target, params=params) lib = update_lib(lib) rt_mod = tvm.contrib.graph_runtime.create(json, lib, ctx) diff --git a/tests/python/relay/test_pass_fast_math.py b/tests/python/relay/test_pass_fast_math.py index e75316f..93ad034 100644 --- a/tests/python/relay/test_pass_fast_math.py +++ b/tests/python/relay/test_pass_fast_math.py @@ -29,7 +29,7 @@ def test_exp(): assert "fast_exp" in fast_mod.astext() # Check that FastMath option works for relay.build. - with relay.build_config(opt_level=3, required_pass=['FastMath']): + with tvm.transform.PassContext(opt_level=3, required_pass=['FastMath']): fast_mod = relay.optimize(mod, target='llvm', params=None) assert "fast_exp" in fast_mod[0].astext() @@ -43,7 +43,7 @@ def test_tanh(): assert "fast_tanh" in fast_mod.astext() # Check that FastMath option works for relay.build. - with relay.build_config(opt_level=3, required_pass=['FastMath']): + with tvm.transform.PassContext(opt_level=3, required_pass=['FastMath']): fast_mod = relay.optimize(mod, target='llvm', params=None) assert "fast_tanh" in fast_mod[0].astext() diff --git a/tests/python/relay/test_pass_fold_constant.py b/tests/python/relay/test_pass_fold_constant.py index 1e8c6da..fcccab5 100644 --- a/tests/python/relay/test_pass_fold_constant.py +++ b/tests/python/relay/test_pass_fold_constant.py @@ -213,7 +213,7 @@ def test_fold_batch_norm(): mod, params = create_workload(bn_output[0], initializer) mod["main"] = bind_params_by_name(mod["main"], params) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): mod = remove_bn_pass(mod) expect = run_infer_type(expected()) diff --git a/tests/python/relay/test_pass_manager.py b/tests/python/relay/test_pass_manager.py index d6037b5..25299ca 100644 --- a/tests/python/relay/test_pass_manager.py +++ b/tests/python/relay/test_pass_manager.py @@ -382,7 +382,7 @@ def test_sequential_pass(): def test_only_module_pass(): passes = [module_pass] sequential = tvm.transform.Sequential(opt_level=1, passes=passes) - with relay.build_config(required_pass=["mod_transform"]): + with tvm.transform.PassContext(required_pass=["mod_transform"]): ret_mod = sequential(mod) # Check the subtract function. sub_var, new_sub = extract_var_func(ret_mod, v_sub.name_hint) @@ -397,7 +397,7 @@ def test_sequential_pass(): # Check the subtract function. passes = [function_pass] sequential = tvm.transform.Sequential(opt_level=1, passes=passes) - with relay.build_config(required_pass=["func_transform"]): + with tvm.transform.PassContext(required_pass=["func_transform"]): ret_mod = sequential(mod) _, new_sub = extract_var_func(ret_mod, v_sub.name_hint) check_func(new_sub, get_ref_sub()) @@ -413,7 +413,7 @@ def test_sequential_pass(): passes = [module_pass, function_pass] sequential = tvm.transform.Sequential(opt_level=1, passes=passes) required = ["mod_transform", "func_transform"] - with relay.build_config(required_pass=required): + with tvm.transform.PassContext(required_pass=required): ret_mod = sequential(mod) # Check the abs function is added. @@ -490,7 +490,7 @@ def test_sequential_with_scoping(): ]) mod = tvm.IRModule({"main": before()}) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): with tvm.target.create("llvm"): mod = seq(mod) @@ -515,7 +515,7 @@ def test_print_ir(capfd): ]) mod = tvm.IRModule({"main": func}) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): mod = seq(mod) out = capfd.readouterr().err @@ -549,7 +549,7 @@ def test_print_debug_callback(): assert __TRACE_COUNTER__ == 0 mod = tvm.IRModule({"main": func}) - with relay.build_config(opt_level=3, trace=_tracer): + with tvm.transform.PassContext(opt_level=3, trace=_tracer): mod = seq(mod) assert __TRACE_COUNTER__ == 3 diff --git a/tests/python/relay/test_pass_partition_graph.py b/tests/python/relay/test_pass_partition_graph.py index 354b616..23bf618 100644 --- a/tests/python/relay/test_pass_partition_graph.py +++ b/tests/python/relay/test_pass_partition_graph.py @@ -195,7 +195,7 @@ def check_result(mod, map_inputs, out_shape, result, tol=1e-5, target="llvm", def check_vm_result(): compile_engine.get().clear() - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): exe = relay.vm.compile(mod, target=target, params=params) code, lib = exe.save() lib = update_lib(lib) @@ -210,7 +210,7 @@ def check_result(mod, map_inputs, out_shape, result, tol=1e-5, target="llvm", def check_graph_runtime_result(): compile_engine.get().clear() - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): json, lib, param = relay.build(mod, target=target, params=params) lib = update_lib(lib) rt_mod = tvm.contrib.graph_runtime.create(json, lib, ctx) @@ -512,7 +512,7 @@ def test_function_lifting(): transform.AlterOpLayout(), ]) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): mod = opt_pass(mod) return mod @@ -595,7 +595,7 @@ def test_function_lifting_inline(): transform.Inline(), ]) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): mod = opt_pass(mod) return mod @@ -885,7 +885,8 @@ def test_dnnl_fuse(): transform.PartitionGraph() ]) - with relay.build_config(opt_level=3, disabled_pass=["AlterOpLayout"]): + with tvm.transform.PassContext(opt_level=3, + disabled_pass=["AlterOpLayout"]): return composite_partition(mod) def test_detect_pattern(pattern_table, include_bn, include_sigmoid, diff --git a/tests/python/relay/test_simplify_fc_transpose.py b/tests/python/relay/test_simplify_fc_transpose.py index 537a5a2..e29038c 100644 --- a/tests/python/relay/test_simplify_fc_transpose.py +++ b/tests/python/relay/test_simplify_fc_transpose.py @@ -27,7 +27,7 @@ from tvm import relay from tvm.relay.data_dep_optimization import simplify_fc_transpose def run_func(func, params, x): - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, new_params = relay.build(func, "llvm", params=params) from tvm.contrib import graph_runtime diff --git a/tests/python/relay/test_sparse_dense_convert.py b/tests/python/relay/test_sparse_dense_convert.py index c4f0572..e0204ae 100644 --- a/tests/python/relay/test_sparse_dense_convert.py +++ b/tests/python/relay/test_sparse_dense_convert.py @@ -46,7 +46,7 @@ def random_bsr_matrix(M, N, BS_R, BS_C, density, dtype="float32"): return s def run_func(func, params, x): - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, new_params = relay.build(func, "llvm", params=params) from tvm.contrib import graph_runtime diff --git a/tests/python/unittest/test_runtime_module_export.py b/tests/python/unittest/test_runtime_module_export.py index fce7d2f..8473a67 100644 --- a/tests/python/unittest/test_runtime_module_export.py +++ b/tests/python/unittest/test_runtime_module_export.py @@ -67,7 +67,7 @@ def test_mod_export(): resnet18_mod, resnet18_params = relay.testing.resnet.get_workload(num_layers=18) resnet50_mod, resnet50_params = relay.testing.resnet.get_workload(num_layers=50) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): _, resnet18_gpu_lib, _ = relay.build_module.build(resnet18_mod, "cuda", params=resnet18_params) _, resnet50_cpu_lib, _ = relay.build_module.build(resnet50_mod, "llvm", params=resnet50_params) @@ -93,7 +93,7 @@ def test_mod_export(): return resnet18_mod, resnet18_params = relay.testing.resnet.get_workload(num_layers=18) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): _, resnet18_cpu_lib, _ = relay.build_module.build(resnet18_mod, "llvm", params=resnet18_params) A = te.placeholder((1024,), name='A') @@ -177,7 +177,7 @@ def test_mod_export(): return resnet18_mod, resnet18_params = relay.testing.resnet.get_workload(num_layers=18) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): _, resnet18_cpu_lib, _ = relay.build_module.build(resnet18_mod, "llvm", params=resnet18_params) A = te.placeholder((1024,), name='A') diff --git a/tests/python/unittest/test_target_codegen_blob.py b/tests/python/unittest/test_target_codegen_blob.py index 719ddfe..7cd5793 100644 --- a/tests/python/unittest/test_target_codegen_blob.py +++ b/tests/python/unittest/test_target_codegen_blob.py @@ -31,7 +31,7 @@ def test_resnet18(): def verify(data): mod, params = relay.testing.resnet.get_workload(num_layers=18) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, graph_params = relay.build_module.build(mod, "llvm", params=params) ctx = tvm.cpu() module = graph_runtime.create(graph, lib, ctx) @@ -42,7 +42,7 @@ def test_resnet18(): return out resnet18_mod, resnet18_params = relay.testing.resnet.get_workload(num_layers=18) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, resnet18_gpu_lib, graph_params = relay.build_module.build(resnet18_mod, "cuda", params=resnet18_params) from tvm.contrib import util diff --git a/tutorials/autotvm/tune_relay_arm.py b/tutorials/autotvm/tune_relay_arm.py index ffd3e8b..3b07097 100644 --- a/tutorials/autotvm/tune_relay_arm.py +++ b/tutorials/autotvm/tune_relay_arm.py @@ -311,7 +311,7 @@ def tune_and_evaluate(tuning_opt): # compile kernels with history best records with autotvm.apply_history_best(log_file): print("Compile...") - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build_module.build( mod, target=target, params=params) diff --git a/tutorials/autotvm/tune_relay_cuda.py b/tutorials/autotvm/tune_relay_cuda.py index 4195075..a6fe45b 100644 --- a/tutorials/autotvm/tune_relay_cuda.py +++ b/tutorials/autotvm/tune_relay_cuda.py @@ -222,7 +222,7 @@ def tune_and_evaluate(tuning_opt): # compile kernels with history best records with autotvm.apply_history_best(log_file): print("Compile...") - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build_module.build( mod, target=target, params=params) diff --git a/tutorials/autotvm/tune_relay_mobile_gpu.py b/tutorials/autotvm/tune_relay_mobile_gpu.py index ad74608..4748f41 100644 --- a/tutorials/autotvm/tune_relay_mobile_gpu.py +++ b/tutorials/autotvm/tune_relay_mobile_gpu.py @@ -308,7 +308,7 @@ def tune_and_evaluate(tuning_opt): # compile kernels with history best records with autotvm.apply_history_best(log_file): print("Compile...") - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build_module.build( mod, target=target, params=params, target_host=target_host) # export library diff --git a/tutorials/autotvm/tune_relay_x86.py b/tutorials/autotvm/tune_relay_x86.py index 15ce2de..dcc5b25 100644 --- a/tutorials/autotvm/tune_relay_x86.py +++ b/tutorials/autotvm/tune_relay_x86.py @@ -189,7 +189,7 @@ def tune_and_evaluate(tuning_opt): # compile kernels with graph-level best records with autotvm.apply_graph_best(graph_opt_sch_file): print("Compile...") - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build_module.build( mod, target=target, params=params) diff --git a/tutorials/dev/relay_pass_infra.py b/tutorials/dev/relay_pass_infra.py index 980d96c..df40733 100644 --- a/tutorials/dev/relay_pass_infra.py +++ b/tutorials/dev/relay_pass_infra.py @@ -160,7 +160,7 @@ print(mod1) # however, provides a configuration interface # for users to customize the optimization level that they want to execute. -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): mod2 = seq(mod) print(mod2) @@ -173,7 +173,7 @@ print(mod2) # EliminateCommonSubexpr as following. The printed module will again show two # identical addition operations. -with relay.build_config(opt_level=3, disabled_pass=["EliminateCommonSubexpr"]): +with tvm.transform.PassContext(opt_level=3, disabled_pass=["EliminateCommonSubexpr"]): mod3 = seq(mod) print(mod3) @@ -182,12 +182,12 @@ print(mod3) # provides a means to make pass target-aware. For example, the layout # alteration pass falls in such category. -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): mod4 = seq(mod) print(mod4) seq1 = tvm.transform.Sequential([relay.transform.AlterOpLayout()]) -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): with tvm.target.create("llvm"): mod5 = seq1(mod) print(mod5) @@ -242,7 +242,7 @@ seq = tvm.transform.Sequential([relay.transform.FoldConstant(), relay.transform.EliminateCommonSubexpr(), relay.transform.FuseOps(), tvm.transform.PrintIR()]) -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): mod = seq(mod) print("done") diff --git a/tutorials/frontend/build_gcn.py b/tutorials/frontend/build_gcn.py index 6ac518e..19719a5 100644 --- a/tutorials/frontend/build_gcn.py +++ b/tutorials/frontend/build_gcn.py @@ -336,7 +336,7 @@ func = relay.build_module.bind_params_by_name(func, params) mod = tvm.IRModule() mod["main"] = func # Build with Relay -with relay.build_config(opt_level=0): # Currently only support opt_level=0 +with tvm.transform.PassContext(opt_level=0): # Currently only support opt_level=0 graph, lib, params = relay.build(mod, target, params=params) # Generate graph runtime diff --git a/tutorials/frontend/deploy_model_on_android.py b/tutorials/frontend/deploy_model_on_android.py index 17ec9cb..bc5b523 100644 --- a/tutorials/frontend/deploy_model_on_android.py +++ b/tutorials/frontend/deploy_model_on_android.py @@ -263,7 +263,7 @@ input_name = 'input_1' shape_dict = {input_name: x.shape} mod, params = relay.frontend.from_keras(keras_mobilenet_v2, shape_dict) -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target=target, target_host=target_host, params=params) diff --git a/tutorials/frontend/deploy_model_on_rasp.py b/tutorials/frontend/deploy_model_on_rasp.py index ef707fe..25df341 100644 --- a/tutorials/frontend/deploy_model_on_rasp.py +++ b/tutorials/frontend/deploy_model_on_rasp.py @@ -179,7 +179,7 @@ else: # The above line is a simple form of # target = tvm.target.create('llvm -device=arm_cpu -model=bcm2837 -target=armv7l-linux-gnueabihf -mattr=+neon') -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(func, target, params=params) # After `relay.build`, you will get three return values: graph, diff --git a/tutorials/frontend/deploy_prequantized.py b/tutorials/frontend/deploy_prequantized.py index 4027977..d6183d6 100644 --- a/tutorials/frontend/deploy_prequantized.py +++ b/tutorials/frontend/deploy_prequantized.py @@ -81,7 +81,7 @@ def get_synset(): def run_tvm_model(mod, params, input_name, inp, target="llvm"): - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): json, lib, params = relay.build(mod, target=target, params=params) runtime = tvm.contrib.graph_runtime.create(json, lib, tvm.context(target, 0)) diff --git a/tutorials/frontend/deploy_prequantized_tflite.py b/tutorials/frontend/deploy_prequantized_tflite.py index 5fd6837..ecd283a 100644 --- a/tutorials/frontend/deploy_prequantized_tflite.py +++ b/tutorials/frontend/deploy_prequantized_tflite.py @@ -198,7 +198,7 @@ mod, params = relay.frontend.from_tflite(tflite_model, # Lets now the compile the Relay module. We use the "llvm" target here. Please replace it with the # target platform that you are interested in. target = 'llvm' -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build_module.build(mod, target=target, params=params) diff --git a/tutorials/frontend/deploy_ssd_gluoncv.py b/tutorials/frontend/deploy_ssd_gluoncv.py index 6126df0..e2fc3c5 100644 --- a/tutorials/frontend/deploy_ssd_gluoncv.py +++ b/tutorials/frontend/deploy_ssd_gluoncv.py @@ -87,7 +87,7 @@ block = model_zoo.get_model(model_name, pretrained=True) def build(target): mod, params = relay.frontend.from_mxnet(block, {"data": dshape}) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target, params=params) return graph, lib, params diff --git a/tutorials/frontend/from_caffe2.py b/tutorials/frontend/from_caffe2.py index 8fad80d..5988525 100644 --- a/tutorials/frontend/from_caffe2.py +++ b/tutorials/frontend/from_caffe2.py @@ -82,13 +82,13 @@ shape_dict = {input_name: data.shape} dtype_dict = {input_name: data.dtype} # parse Caffe2 model and convert into Relay computation graph -from tvm import relay +from tvm import relay, transform mod, params = relay.frontend.from_caffe2(resnet50.init_net, resnet50.predict_net, shape_dict, dtype_dict) # compile the model # target x86 CPU target = 'llvm' -with relay.build_config(opt_level=3): +with transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target, params=params) ###################################################################### diff --git a/tutorials/frontend/from_coreml.py b/tutorials/frontend/from_coreml.py index 2a0c8db..beac4832 100644 --- a/tutorials/frontend/from_coreml.py +++ b/tutorials/frontend/from_coreml.py @@ -74,7 +74,7 @@ shape_dict = {'image': x.shape} # Parse CoreML model and convert into Relay computation graph mod, params = relay.frontend.from_coreml(mlmodel, shape_dict) -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target, params=params) diff --git a/tutorials/frontend/from_darknet.py b/tutorials/frontend/from_darknet.py index e2c1ea5..6d84463 100644 --- a/tutorials/frontend/from_darknet.py +++ b/tutorials/frontend/from_darknet.py @@ -100,7 +100,7 @@ ctx = tvm.cpu(0) data = np.empty([batch_size, net.c, net.h, net.w], dtype) shape = {'data': data.shape} print("Compiling the model...") -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target=target, target_host=target_host, diff --git a/tutorials/frontend/from_keras.py b/tutorials/frontend/from_keras.py index 928a8ac..7ece790 100644 --- a/tutorials/frontend/from_keras.py +++ b/tutorials/frontend/from_keras.py @@ -79,7 +79,7 @@ mod, params = relay.frontend.from_keras(keras_resnet50, shape_dict) # compile the model target = 'cuda' ctx = tvm.gpu(0) -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): executor = relay.build_module.create_executor('graph', mod, ctx, target) ###################################################################### diff --git a/tutorials/frontend/from_mxnet.py b/tutorials/frontend/from_mxnet.py index d0e4c4a..6e6b2d7 100644 --- a/tutorials/frontend/from_mxnet.py +++ b/tutorials/frontend/from_mxnet.py @@ -90,7 +90,7 @@ func = relay.Function(func.params, relay.nn.softmax(func.body), None, func.type_ ###################################################################### # now compile the graph target = 'cuda' -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(func, target, params=params) ###################################################################### diff --git a/tutorials/frontend/from_onnx.py b/tutorials/frontend/from_onnx.py index 766451c..9973a08 100644 --- a/tutorials/frontend/from_onnx.py +++ b/tutorials/frontend/from_onnx.py @@ -74,7 +74,7 @@ input_name = '1' shape_dict = {input_name: x.shape} mod, params = relay.frontend.from_onnx(onnx_model, shape_dict) -with relay.build_config(opt_level=1): +with tvm.transform.PassContext(opt_level=1): intrp = relay.build_module.create_executor('graph', mod, tvm.cpu(0), target) ###################################################################### diff --git a/tutorials/frontend/from_pytorch.py b/tutorials/frontend/from_pytorch.py index 8354b0e..53d29a9 100644 --- a/tutorials/frontend/from_pytorch.py +++ b/tutorials/frontend/from_pytorch.py @@ -101,7 +101,7 @@ mod, params = relay.frontend.from_pytorch(scripted_model, target = 'llvm' target_host = 'llvm' ctx = tvm.cpu(0) -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target=target, target_host=target_host, diff --git a/tutorials/frontend/from_tensorflow.py b/tutorials/frontend/from_tensorflow.py index 0ebd733..b7b3d69 100644 --- a/tutorials/frontend/from_tensorflow.py +++ b/tutorials/frontend/from_tensorflow.py @@ -144,7 +144,7 @@ print("Tensorflow protobuf imported to relay frontend.") # params: final params after compilation. # lib: target library which can be deployed on target with TVM runtime. -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target=target, target_host=target_host, diff --git a/tutorials/frontend/from_tflite.py b/tutorials/frontend/from_tflite.py index e01a4ec..35a308c 100644 --- a/tutorials/frontend/from_tflite.py +++ b/tutorials/frontend/from_tflite.py @@ -128,14 +128,14 @@ input_shape = (1, 224, 224, 3) input_dtype = "float32" # Parse TFLite model and convert it to a Relay module -from tvm import relay +from tvm import relay, transform mod, params = relay.frontend.from_tflite(tflite_model, shape_dict={input_tensor: input_shape}, dtype_dict={input_tensor: input_dtype}) # Build the module against to x86 CPU target = "llvm" -with relay.build_config(opt_level=3): +with transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target, params=params) ###################################################################### diff --git a/tutorials/relay_quick_start.py b/tutorials/relay_quick_start.py index b2174a0..e52a99a 100644 --- a/tutorials/relay_quick_start.py +++ b/tutorials/relay_quick_start.py @@ -96,7 +96,7 @@ print(mod.astext(show_meta_data=False)) opt_level = 3 target = tvm.target.cuda() -with relay.build_config(opt_level=opt_level): +with tvm.transform.PassContext(opt_level=opt_level): graph, lib, params = relay.build(mod, target, params=params) ##################################################################### diff --git a/vta/scripts/tune_resnet.py b/vta/scripts/tune_resnet.py index 26c240e..2d358d3 100644 --- a/vta/scripts/tune_resnet.py +++ b/vta/scripts/tune_resnet.py @@ -127,7 +127,7 @@ def compile_network(opt, env, target): # Perform quantization in Relay # Note: We set opt_level to 3 in order to fold batch norm - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): with relay.quantize.qconfig(global_scale=8.0, skip_conv_layers=[0]): relay_prog = relay.quantize.quantize(mod["main"], params=params) @@ -272,7 +272,7 @@ if __name__ == '__main__': # Compile network print("Compiling network with best tuning parameters...") if target.device_name != "vta": - with relay.build_config(opt_level=3, disabled_pass={"AlterOpLayout"}): + with tvm.transform.PassContext(opt_level=3, disabled_pass={"AlterOpLayout"}): graph, lib, params = relay.build( relay_prog, target=target, params=params, target_host=env.target_host) diff --git a/vta/tutorials/autotvm/tune_relay_vta.py b/vta/tutorials/autotvm/tune_relay_vta.py index 63106a5..a92b1ee 100644 --- a/vta/tutorials/autotvm/tune_relay_vta.py +++ b/vta/tutorials/autotvm/tune_relay_vta.py @@ -92,7 +92,7 @@ def compile_network(env, target, model, start_pack, stop_pack): # Perform quantization in Relay # Note: We set opt_level to 3 in order to fold batch norm - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): with relay.quantize.qconfig(global_scale=8.0, skip_conv_layers=[0]): mod = relay.quantize.quantize(mod, params=params) diff --git a/vta/tutorials/frontend/deploy_classification.py b/vta/tutorials/frontend/deploy_classification.py index 7ca4b98..3a36785 100644 --- a/vta/tutorials/frontend/deploy_classification.py +++ b/vta/tutorials/frontend/deploy_classification.py @@ -171,7 +171,7 @@ with autotvm.tophub.context(target): if target.device_name == "vta": # Perform quantization in Relay # Note: We set opt_level to 3 in order to fold batch norm - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): with relay.quantize.qconfig(global_scale=8.0, skip_conv_layers=[0]): mod = relay.quantize.quantize(mod, params=params) diff --git a/vta/tutorials/frontend/deploy_detection.py b/vta/tutorials/frontend/deploy_detection.py index efcd2c4..5039488 100644 --- a/vta/tutorials/frontend/deploy_detection.py +++ b/vta/tutorials/frontend/deploy_detection.py @@ -207,7 +207,7 @@ with autotvm.tophub.context(target): if target.device_name == "vta": # Perform quantization in Relay # Note: We set opt_level to 3 in order to fold batch norm - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): with relay.quantize.qconfig(global_scale=33.0, skip_conv_layers=[0], store_lowbit_output=True, -- 2.7.4