namespace tvm {
namespace relay {
-/*!
- * \brief Infer the type of a function as if it is mapped to var in the mod.
- *
- * \param f the function.
- * \param mod The module used for referencing global functions.
- * \param var The global variable corresponding to the function.
- *
- * \return A type checked Function with its checked_type field populated.
- * \note this function mutates mod and is not thread-safe.
- */
-TVM_DLL Function InferType(const Function& f,
- const Module& mod,
- const GlobalVar& var);
-
/*!
* \brief Check that types are well kinded by applying "kinding rules".
*
/*! \brief A map from global type vars to ADT type data. */
tvm::Map<GlobalTypeVar, TypeData> type_definitions;
- /*! \brief The entry function (i.e. "main"). */
- GlobalVar entry_func;
-
ModuleNode() {}
void VisitAttrs(tvm::AttrVisitor* v) final {
v->Visit("functions", &functions);
v->Visit("type_definitions", &type_definitions);
v->Visit("global_var_map_", &global_var_map_);
- v->Visit("entry_func", &entry_func);
v->Visit("global_type_var_map_", &global_type_var_map_);
}
*/
TVM_DLL void Remove(const GlobalVar& var);
+ /*!
+ * \brief Check if the global_var_map_ contains a global variable.
+ * \param name The variable name.
+ * \returns true if contains, otherise false.
+ */
+ TVM_DLL bool ContainGlobalVar(const std::string& name) const;
+
/*!
* \brief Lookup a global function by its variable.
* \param str The unique string specifying the global variable.
* Allows one to optionally pass a global function map as
* well.
*
- * \param expr The expression to set as the entry point to the module.
+ * \param expr The expression to set as the main function to the module.
* \param global_funcs The global function map.
*
- * \returns A module with expr set as the entry point.
+ * \returns A module with expr set as the main function.
*/
TVM_DLL static Module FromExpr(
const Expr& expr,
# Generate workload and schedule dictionaries.
if isinstance(graph, relay.Module):
- graph = graph[graph.entry_func]
+ graph = graph["main"]
if isinstance(graph, relay.expr.Function):
node_dict = {}
"""A method to infer the type of a relay expression."""
mod = relay.Module.from_expr(node)
mod = transform.InferType()(mod)
- entry = mod[mod.entry_func]
+ entry = mod["main"]
return entry if isinstance(node, relay.Function) else entry.body
mod = relay.Module.from_expr(updated_expr)
mod = transform.InferType()(mod)
- entry = mod[mod.entry_func]
+ entry = mod["main"]
return entry if isinstance(updated_expr, relay.Function) else entry.body
assert self.mod is not None
def _interp_wrapper(*args, **kwargs):
if expr is None:
- args = self._convert_args(self.mod[self.mod.entry_func], args, kwargs)
+ args = self._convert_args(self.mod["main"], args, kwargs)
else:
args = self._convert_args(expr, args, kwargs)
if expr is None:
pass
elif isinstance(expr, GlobalVar):
- self.mod[self.mod.entry_func] = self.mod[expr]
+ self.mod["main"] = self.mod[expr]
else:
assert isinstance(expr, Function)
func = Function([], Call(expr, relay_args))
relay_args = []
if self.mod:
- self.mod[self.mod.entry_func] = func
+ self.mod["main"] = func
else:
self.mod = module.Module.from_expr(func)
mod = self.optimize()
- opt_expr = Call(mod[self.mod.entry_func.name_hint], relay_args)
+ opt_expr = Call(mod["main"], relay_args)
return self._intrp(opt_expr)
return _interp_wrapper
ret : tvm.relay.Module
The optimized module.
"""
- main_func = mod[mod.entry_func]
+ main_func = mod["main"]
opt_passes = []
if not main_func.params and isinstance(main_func.body, GlobalVar):
expr = expr if expr else self.mod
assert expr, "either expr or self.mod should be not null."
if isinstance(expr, Expr):
- self.mod[self.mod.entry_func] = expr
- main = self.mod[self.mod.entry_func]
+ self.mod["main"] = expr
+ main = self.mod["main"]
def _vm_wrapper(*args, **kwargs):
args = self._convert_args(main, args, kwargs)
The parameters of the final graph.
"""
if isinstance(mod, _Module):
- func = mod[mod.entry_func]
+ func = mod["main"]
elif isinstance(mod, _expr.Function):
func = mod
warnings.warn(
def _make_executor(self, expr=None):
if expr:
- self.mod[self.mod.entry_func] = expr
- ret_type = self.mod[self.mod.entry_func].checked_type.ret_type
+ self.mod["main"] = expr
+ ret_type = self.mod["main"].checked_type.ret_type
num_outputs = len(ret_type.fields) if isinstance(ret_type, _ty.TupleType) else 1
graph_json, mod, params = build(self.mod, target=self.target)
gmodule = _graph_rt.create(graph_json, mod, self.ctx)
gmodule.set_input(**params)
def _graph_wrapper(*args, **kwargs):
- args = self._convert_args(self.mod[self.mod.entry_func], args, kwargs)
+ args = self._convert_args(self.mod["main"], args, kwargs)
# Create map of inputs.
for i, arg in enumerate(args):
gmodule.set_input(i, arg)
outputs = out[0]
func = _expr.Function(analysis.free_vars(outputs), outputs)
- self._mod[self._mod.entry_func] = func
+ self._mod["main"] = func
return self._mod, self._params
"""A method to infer the type of an intermediate node in the relay graph."""
mod = _module.Module.from_expr(node)
mod = _transform.InferType()(mod)
- entry = mod[mod.entry_func]
+ entry = mod["main"]
return entry if isinstance(node, _expr.Function) else entry.body
def infer_shape(inputs):
"""A method to infer the type of an intermediate node in the relay graph."""
mod = _module.Module.from_expr(node)
mod = transform.InferType()(mod)
- entry = mod[mod.entry_func]
+ entry = mod["main"]
return entry if isinstance(node, _expr.Function) else entry.body
def _mx_fully_connected(inputs, attrs):
else:
msg = "mxnet.Symbol or gluon.HybridBlock expected, got {}".format(type(symbol))
raise ValueError(msg)
- mod[mod.entry_func] = func
+ mod["main"] = func
return mod, params
"""A method to infer the type of an intermediate node in the relay graph."""
mod = _module.Module.from_expr(node)
mod = _transform.InferType()(mod)
- entry = mod[mod.entry_func]
+ entry = mod["main"]
return entry if isinstance(node, _expr.Function) else entry.body
def _infer_shape(node, params=None):
out = out[0] if len(out) == 1 else _expr.Tuple(out)
func = _expr.Function(analysis.free_vars(out), out)
- self._mod[self._mod.entry_func] = func
+ self._mod["main"] = func
return self._mod, self._params
def _parse_import_prerequisites(self, graph):
def _add(self, var, val, update=False):
if isinstance(val, _expr.Expr):
if isinstance(var, _base.string_types):
- var = _expr.GlobalVar(var)
- _make.Module_Add(self, var, val, update)
+ if _module.Module_ContainGlobalVar(self, var):
+ var = _module.Module_GetGlobalVar(self, var)
+ else:
+ var = _expr.GlobalVar(var)
+ _module.Module_Add(self, var, val, update)
else:
assert isinstance(val, _ty.Type)
if isinstance(var, _base.string_types):
mod = optimize(mod)
mod = quantize_seq(mod)
- return mod[mod.entry_func.name_hint]
+ return mod["main"]
assert isinstance(opt_pass, transform.Pass)
mod = relay.Module.from_expr(expr)
mod = opt_pass(mod)
- entry = mod[mod.entry_func]
+ entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
Returns
-------
- net : nnvm.symbol
- The computational graph
+ mod : tvm.relay.Module
+ The relay module that contains a DCGAN network.
params : dict of str to NDArray
The parameters.
"""
Returns
-------
- net: relay.Function
- The computation graph representing densenet.
+ mod: tvm.relay.Module
+ The relay module that contains a DenseNet network.
params : dict of str to NDArray
The benchmark paraeters.
The data type
Returns
-------
- net : nnvm.symbol
- The computational graph
+ mod : tvm.relay.Module
+ The relay module that contains a DQN network.
params : dict of str to NDArray
The parameters.
"""
Returns
-------
- net : nnvm.Symbol
- The computational graph
+ mod : tvm.relay.Module
+ The relay module that contains an Inception V3 network.
params : dict of str to NDArray
The parameters.
Returns
-------
- net : tvm.relay.Function
- The updated dataflow
+ mod : tvm.relay.Module
+ The created relay module.
params : dict of str to NDArray
The parameters.
"""
mod = relay.Module.from_expr(net)
mod = relay.transform.InferType()(mod)
- net = mod[mod.entry_func]
shape_dict = {
- v.name_hint : v.checked_type for v in net.params}
+ v.name_hint : v.checked_type for v in mod["main"].params}
np.random.seed(seed)
initializer = initializer if initializer else Xavier()
params = {}
init_value = np.zeros(v.concrete_shape).astype(v.dtype)
initializer(k, init_value)
params[k] = tvm.nd.array(init_value, ctx=tvm.cpu(0))
- return net, params
+ return mod, params
The data type
Returns
-------
- net : nnvm.symbol
- The computational graph
+ mod : tvm.relay.Module
+ The relay module that contains a LSTM network.
params : dict of str to NDArray
The parameters.
"""
Returns
-------
- net : relay.Function
- The dataflow.
+ mod : tvm.relay.Module
+ The relay module that contains a mlp network.
params : dict of str to NDArray
The parameters.
Returns
-------
- net : relay.Function
- The computational graph
+ mod : tvm.relay.Module
+ The relay module that contains a MobileNet network.
params : dict of str to NDArray
The parameters.
Returns
-------
- net : relay.Function
- The computational graph
+ mod : tvm.relay.Module
+ The relay module that contains a ResNet network.
params : dict of str to NDArray
The parameters.
Returns
-------
- net : nnvm.Symbol
- The computational graph
+ mod : tvm.relay.Module
+ The relay module that contains a SqueezeNet network.
params : dict of str to NDArray
The parameters.
Returns
-------
- net : nnvm.Symbol
- The computational graph
+ mod : tvm.relay.Module
+ The relay module that contains a VGG network.
params : dict of str to NDArray
The parameters.
relay_module = Optimize(relay_module, targets_, params);
CHECK(relay_module.defined());
// Get the updated function.
- func = relay_module->Lookup(relay_module->entry_func->name_hint);
+ func = relay_module->Lookup("main");
// Generate code for the updated function.
graph_codegen_ = std::unique_ptr<GraphCodegen>(new GraphCodegen());
// TODO(zhiics): This measurement is for temporary usage. Remove it later. We
// need to introduce a better profiling method.
#if ENABLE_PROFILING
- DLOG(INFO) << "Entry function is " << module->entry_func << std::endl;
+ DLOG(INFO) << "Entry function is main." << std::endl;
auto start = std::chrono::high_resolution_clock::now();
#endif // ENABLE_PROFILING
- Object res = vm.Invoke(module->entry_func->name_hint, vm_args);
+ Object res = vm.Invoke("main", vm_args);
#if ENABLE_PROFILING
auto end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
n->global_var_map_.Set(kv.first->name_hint, kv.first);
}
- n->entry_func = GlobalVarNode::make("main");
-
for (const auto& kv : n->type_definitions) {
// set global typevar map
CHECK(!n->global_type_var_map_.count(kv.first->var->name_hint))
return Module(n);
}
+bool ModuleNode::ContainGlobalVar(const std::string& name) const {
+ return global_var_map_.find(name) != global_var_map_.end();
+}
+
GlobalVar ModuleNode::GetGlobalVar(const std::string& name) const {
auto it = global_var_map_.find(name);
CHECK(it != global_var_map_.end())
} else {
func = FunctionNode::make({}, expr, Type(), {}, {});
}
- mod->Add(mod->entry_func, func);
+ auto main_gv = GlobalVarNode::make("main");
+ mod->Add(main_gv, func);
return mod;
}
TVM_REGISTER_API("relay._make.Module")
.set_body_typed(ModuleNode::make);
-TVM_REGISTER_API("relay._make.Module_Add")
+TVM_REGISTER_API("relay._module.Module_Add")
.set_body([](TVMArgs args, TVMRetValue* ret) {
Module mod = args[0];
GlobalVar var = args[1];
TVM_REGISTER_API("relay._module.Module_GetGlobalVar")
.set_body_method<Module>(&ModuleNode::GetGlobalVar);
+TVM_REGISTER_API("relay._module.Module_ContainGlobalVar")
+.set_body_method<Module>(&ModuleNode::ContainGlobalVar);
+
TVM_REGISTER_API("relay._module.Module_GetGlobalTypeVar")
.set_body_method<Module>(&ModuleNode::GetGlobalTypeVar);
auto mod = ModuleNode::FromExpr(expr);
auto seq = transform::Sequential(passes);
mod = seq(mod);
- auto entry_func = mod->Lookup(mod->entry_func);
+ auto entry_func = mod->Lookup("main");
expr = expr.as<FunctionNode>() == nullptr ? entry_func->body : entry_func;
return ValueToExpr(executor_(expr));
}
auto mod = ModuleNode::FromExpr(expr);
auto seq = transform::Sequential(passes);
mod = seq(mod);
- auto entry_func = mod->Lookup(mod->entry_func);
+ auto entry_func = mod->Lookup("main");
auto fused_infered =
expr.as<FunctionNode>() == nullptr ? entry_func->body : entry_func;
return Reify(executor_(fused_infered), ll);
} // namespace partial_eval
Module PartialEval(const Module& m) {
- CHECK(m->entry_func.defined());
relay::partial_eval::PartialEvaluator pe(m);
std::vector<GlobalVar> gvs;
for (const auto& p : m->functions) {
Expr FoldConstantOpt(const Expr& expr) {
auto mod = ModuleNode::FromExpr(expr);
mod = transform::FoldConstant()(mod);
- auto entry_func = mod->Lookup(mod->entry_func);
+ auto entry_func = mod->Lookup("main");
return expr.as<FunctionNode>() == nullptr ? entry_func->body : entry_func;
}
// type check it anyway; afterwards we can just recover type
// from the type-checked function to avoid doing unnecessary work.
- Function func = mod->Lookup(mod->entry_func);
+ Function func = mod->Lookup("main");
// FromExpr wraps a naked expression as a function, we will unbox
// it here.
return func->body;
}
} else {
- auto e = TypeInferencer(mod_ref, mod_ref->entry_func).Infer(expr);
+ auto e = TypeInferencer(mod_ref, mod_ref->GetGlobalVar("main")).Infer(expr);
CHECK(WellFormed(e));
auto free_tvars = FreeTypeVars(e, mod_ref);
CHECK(free_tvars.size() == 0)
auto fx = relay::FunctionNode::make(tvm::Array<relay::Var>{ y }, call, relay::Type(), {});
auto mod = relay::ModuleNode::FromExpr(fx);
mod = relay::transform::InferType()(mod);
- auto type_fx = mod->Lookup(mod->entry_func);
+ auto type_fx = mod->Lookup("main");
auto expected = relay::FuncTypeNode::make(tvm::Array<relay::Type>{ tensor_type }, tensor_type, {}, {});
CHECK(AlphaEqual(type_fx->checked_type(), expected));
}
CHECK(mod.defined());
- auto entry_func = mod->entry_func;
+ auto entry_func = mod->GetGlobalVar("main");
CHECK(entry_func.defined());
- relay::Function f = mod->Lookup(entry_func->name_hint);
+ relay::Function f = mod->Lookup("main");
CHECK(f.defined());
// Expected function
// Infer type for the expected function.
auto mod1 = relay::ModuleNode::FromExpr(expected_func);
mod1 = relay::transform::InferType()(mod1);
- auto expected = mod1->Lookup(mod1->entry_func);
+ auto expected = mod1->Lookup("main");
CHECK(relay::AlphaEqual(f, expected));
}
from model_zoo import c2_squeezenet, relay_squeezenet
-def compare_graph(lhs_mod, func):
- rhs_mod = relay.Module.from_expr(func)
+def compare_graph(lhs_mod, rhs_mod):
+ lhs_mod = transform.InferType()(lhs_mod)
rhs_mod = transform.InferType()(rhs_mod)
- assert relay.analysis.alpha_equal(lhs_mod[lhs_mod.entry_func],
- rhs_mod[rhs_mod.entry_func])
+ assert relay.analysis.alpha_equal(lhs_mod["main"], rhs_mod["main"])
def test_squeeze_net():
dtype_dict = {'data': 'float32'}
mod, _, = relay.frontend.from_caffe2(
c2_squeezenet.init_net, c2_squeezenet.predict_net, shape_dict, dtype_dict)
- relay_func, _ = relay_squeezenet()
- compare_graph(mod, relay_func)
+ relay_mod, _ = relay_squeezenet()
+ compare_graph(mod, relay_mod)
if __name__ == '__main__':
shape_dict = {input_name : x.shape}
mod, params = relay.frontend.from_coreml(model, shape_dict)
for target, ctx in ctx_list():
- tvm_output = get_tvm_output(mod[mod.entry_func], x, params, target, ctx)
+ tvm_output = get_tvm_output(mod["main"], x, params, target, ctx)
print(target, ctx, model_name, 'prediction id: ', np.argmax(tvm_output.flat))
def test_mobilenet_checkonly():
from tvm.relay import transform
import model_zoo
-def compare_graph(f1, f2):
- assert relay.analysis.alpha_equal(f1, f2)
+def compare_graph(lhs_mod, rhs_mod):
+ lhs_mod = transform.InferType()(lhs_mod)
+ rhs_mod = transform.InferType()(rhs_mod)
+ assert relay.analysis.alpha_equal(lhs_mod["main"], rhs_mod["main"])
def test_mlp():
shape = {"data": (1, 1, 28, 28)}
mx_fun = model_zoo.mx_mlp()
mod, _ = relay.frontend.from_mxnet(mx_fun, shape=shape)
relay_fun = model_zoo.relay_mlp()
- compare_graph(mod[mod.entry_func], relay_fun)
+ compare_graph(mod, relay_fun)
def test_vgg():
for n in [11, 13, 16, 19]:
mx_sym = model_zoo.mx_vgg(n)
mod, _ = relay.frontend.from_mxnet(mx_sym, shape=shape)
- relay_sym = model_zoo.relay_vgg(n)
- compare_graph(mod[mod.entry_func], relay_sym)
+ relay_mod = model_zoo.relay_vgg(n)
+ compare_graph(mod, relay_mod)
def test_resnet():
for n in [18, 34, 50, 101]:
mx_sym = model_zoo.mx_resnet(n)
mod, _ = relay.frontend.from_mxnet(mx_sym, shape=shape)
- relay_sym = model_zoo.relay_resnet(n)
- compare_graph(mod[mod.entry_func], relay_sym)
+ relay_mod = model_zoo.relay_resnet(n)
+ compare_graph(mod, relay_mod)
def test_squeezenet():
for version in ['1.0', '1.1']:
mx_sym = model_zoo.mx_squeezenet(version)
mod, _ = relay.frontend.from_mxnet(mx_sym, shape)
- relay_sym = model_zoo.relay_squeezenet(version)
- compare_graph(mod[mod.entry_func], relay_sym)
+ relay_mod = model_zoo.relay_squeezenet(version)
+ compare_graph(mod, relay_mod)
def test_inception_v3():
shape = {"data": (1, 3, 299, 299)}
mx_sym = model_zoo.mx_inception_v3()
mod, _ = relay.frontend.from_mxnet(mx_sym, shape)
- relay_sym = model_zoo.relay_inception_v3()
- compare_graph(mod[mod.entry_func], relay_sym)
+ relay_mod = model_zoo.relay_inception_v3()
+ compare_graph(mod, relay_mod)
def test_dqn():
shape = {"data": (1, 4, 84, 84)}
mx_sym = model_zoo.mx_dqn()
mod, _ = relay.frontend.from_mxnet(mx_sym, shape)
- relay_sym = model_zoo.relay_dqn()
- compare_graph(mod[mod.entry_func], relay_sym)
+ relay_mod = model_zoo.relay_dqn()
+ compare_graph(mod, relay_mod)
def test_dcgan():
shape = {"data": (2, 100)}
mx_sym = model_zoo.mx_dcgan()
mod, _ = relay.frontend.from_mxnet(mx_sym, shape)
- relay_sym = model_zoo.relay_dcgan(batch_size=2)
- compare_graph(mod[mod.entry_func], relay_sym)
+ relay_mod = model_zoo.relay_dcgan(batch_size=2)
+ compare_graph(mod, relay_mod)
def test_multi_outputs():
z = F.split(x, **kwargs)
z = F.subtract(F.add(z[0], z[2]), y)
func = relay.Function(relay.analysis.free_vars(z), z)
- mod = relay.Module.from_expr(func)
- mod = transform.InferType()(mod)
- return mod[mod.entry_func]
+ return relay.Module.from_expr(func)
mx_sym = mx_compose(mx, num_outputs=3, axis=1)
mod, _ = relay.frontend.from_mxnet(
mx_sym, shape={"x":xshape, "y":yshape})
- relay_sym = relay_compose(relay, indices_or_sections=3, axis=1)
- compare_graph(mod[mod.entry_func], relay_sym)
+ relay_mod = relay_compose(relay, indices_or_sections=3, axis=1)
+ compare_graph(mod, relay_mod)
if __name__ == "__main__":
with autotvm.tophub.context(target):
mod = relay.Module.from_expr(N)
mod = transform.AlterOpLayout()(mod)
- O = mod[mod.entry_func]
+ O = mod["main"]
# graph should differ
assert not relay.analysis.alpha_equal(N, O)
from tvm.relay import testing
-def benchmark_execution(net,
+def benchmark_execution(mod,
params,
measure=False,
data_shape=(1, 3, 224, 224),
out_shape=(1, 1000),
dtype='float32'):
- def get_tvm_output(net, data, params, target, ctx, dtype='float32'):
+ def get_tvm_output(mod, data, params, target, ctx, dtype='float32'):
with relay.build_config(opt_level=1):
- graph, lib, params = relay.build(net, target, params=params)
+ graph, lib, params = relay.build(mod, target, params=params)
m = graph_runtime.create(graph, lib, ctx)
# set inputs
return out.asnumpy()
- def get_tvm_vm_output(net, data, params, target, ctx, dtype='float32'):
- ex = relay.create_executor('vm', mod=relay.Module(), ctx=ctx)
- result = ex.evaluate(net)(data, **params)
+ def get_tvm_vm_output(mod, data, params, target, ctx, dtype='float32'):
+ ex = relay.create_executor('vm', mod=mod, ctx=ctx)
+ result = ex.evaluate()(data, **params)
return result.asnumpy().astype(dtype)
# random input
target = "llvm"
ctx = tvm.cpu(0)
- tvm_out = get_tvm_output(net, tvm.nd.array(data.astype(dtype)), params,
+ tvm_out = get_tvm_output(mod, tvm.nd.array(data.astype(dtype)), params,
target, ctx, dtype)
- vm_out = get_tvm_vm_output(net, tvm.nd.array(data.astype(dtype)), params,
+ vm_out = get_tvm_vm_output(mod, tvm.nd.array(data.astype(dtype)), params,
target, ctx, dtype)
tvm.testing.assert_allclose(vm_out, tvm_out, rtol=1e-5, atol=1e-5)
def test_mlp():
- image_shape = (1, 28, 28)
- net, params = testing.mlp.get_workload(1)
- benchmark_execution(net, params, data_shape=image_shape, out_shape=(1, 10))
+ image_shape = (1, 1, 28, 28)
+ mod, params = testing.mlp.get_workload(1)
+ benchmark_execution(mod, params, data_shape=image_shape, out_shape=(1, 10))
def test_vgg():
for n in [11, 16]:
- net, params = testing.vgg.get_workload(1, num_layers=n)
- benchmark_execution(net, params)
+ mod, params = testing.vgg.get_workload(1, num_layers=n)
+ benchmark_execution(mod, params)
def test_resnet():
for n in [18, 50]:
- net, params = testing.resnet.get_workload(batch_size=1, num_layers=n)
- benchmark_execution(net, params, True)
+ mod, params = testing.resnet.get_workload(batch_size=1, num_layers=n)
+ benchmark_execution(mod, params, True)
def test_squeezenet():
for version in ['1.0', '1.1']:
- net, params = testing.squeezenet.get_workload(version=version)
- benchmark_execution(net, params)
+ mod, params = testing.squeezenet.get_workload(version=version)
+ benchmark_execution(mod, params)
def test_inception_v3():
- image_shape = (3, 299, 299)
- net, params = testing.inception_v3.get_workload(image_shape=image_shape)
- benchmark_execution(net, params, data_shape=image_shape)
+ image_shape = (1, 3, 299, 299)
+ mod, params = testing.inception_v3.get_workload(image_shape=image_shape)
+ benchmark_execution(mod, params, data_shape=image_shape)
def test_dqn():
- image_shape = (4, 84, 84)
- net, params = testing.dqn.get_workload(
+ image_shape = (1, 4, 84, 84)
+ mod, params = testing.dqn.get_workload(
batch_size=1, image_shape=image_shape)
- benchmark_execution(net, params, data_shape=image_shape, out_shape=(1, 18))
+ benchmark_execution(mod, params, data_shape=image_shape, out_shape=(1, 18))
def test_dcgan():
image_shape = (1, 100)
- net, params = testing.dcgan.get_workload(batch_size=1)
- benchmark_execution(net, params, data_shape=image_shape)
+ mod, params = testing.dcgan.get_workload(batch_size=1)
+ benchmark_execution(mod, params, data_shape=image_shape)
def test_mobilenet():
- net, params = testing.mobilenet.get_workload(batch_size=1)
- benchmark_execution(net, params)
+ mod, params = testing.mobilenet.get_workload(batch_size=1)
+ benchmark_execution(mod, params)
def test_densenet():
- net, params = testing.densenet.get_workload(batch_size=1)
- benchmark_execution(net, params)
+ mod, params = testing.densenet.get_workload(batch_size=1)
+ benchmark_execution(mod, params)
if __name__ == '__main__':
input_shape = (batch_size, 3, 224, 224)
if name == 'resnet-18':
- net, params = relay.testing.resnet.get_workload(num_layers=18, batch_size=batch_size)
+ mod, params = relay.testing.resnet.get_workload(num_layers=18, batch_size=batch_size)
elif name == 'mobilenet':
- net, params = relay.testing.mobilenet.get_workload(batch_size=batch_size)
+ mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size)
elif name == 'dcgan':
- net, params = relay.testing.dcgan.get_workload(batch_size=batch_size)
+ mod, params = relay.testing.dcgan.get_workload(batch_size=batch_size)
input_shape = (batch_size, 100)
else:
raise ValueError("Unsupported network: " + name)
- return net, params, input_shape
+ return mod, params, input_shape
def test_task_extraction():
target = 'llvm'
- net, params, input_shape = get_network('resnet-18', batch_size=1)
- tasks = autotvm.task.extract_from_program(net, target=target,
+ mod, params, input_shape = get_network('resnet-18', batch_size=1)
+ tasks = autotvm.task.extract_from_program(mod["main"], target=target,
params=params,
ops=(relay.op.nn.conv2d,))
assert len(tasks) == 12
- net, params, input_shape = get_network('resnet-18', batch_size=1)
- tasks = autotvm.task.extract_from_program(net, target=target,
- params=params,
- ops=(relay.op.nn.dense,))
+ mod, params, input_shape = get_network('resnet-18', batch_size=1)
+ tasks = autotvm.task.extract_from_program(mod["main"], target=target,
+ params=params,
+ ops=(relay.op.nn.dense,))
assert len(tasks) == 1
- net, params, input_shape = get_network('resnet-18', batch_size=1)
- tasks = autotvm.task.extract_from_program(net, target=target,
- params=params,
- ops=(relay.op.nn.conv2d, relay.op.nn.dense))
+ mod, params, input_shape = get_network('resnet-18', batch_size=1)
+ tasks = autotvm.task.extract_from_program(mod["main"], target=target,
+ params=params,
+ ops=(relay.op.nn.conv2d, relay.op.nn.dense))
assert len(tasks) == 13
- net, params, input_shape = get_network('mobilenet', batch_size=1)
- tasks = autotvm.task.extract_from_program(net, target=target,
- params=params,
- ops=(relay.op.nn.conv2d, relay.op.nn.dense))
+ mod, params, input_shape = get_network('mobilenet', batch_size=1)
+ tasks = autotvm.task.extract_from_program(mod["main"], target=target,
+ params=params,
+ ops=(relay.op.nn.conv2d, relay.op.nn.dense))
assert len(tasks) == 20
- net, params, input_shape = get_network('dcgan', batch_size=1)
- tasks = autotvm.task.extract_from_program(net, target=target,
- params=params,
- ops=(relay.op.nn.conv2d_transpose,))
+ mod, params, input_shape = get_network('dcgan', batch_size=1)
+ tasks = autotvm.task.extract_from_program(mod["main"], target=target,
+ params=params,
+ ops=(relay.op.nn.conv2d_transpose,))
assert len(tasks) == 4
if __name__ == '__main__':
f = relay.Function([x], z)
mod = relay.Module.from_expr(f)
mod = relay.transform.InferType()(mod)
- return mod[mod.entry_func]
+ return mod["main"]
z1 = engine.lower(get_func((10,)), "llvm")
z2 = engine.lower(get_func((10,)), "llvm")
z3 = engine.lower(get_func(()), "llvm")
func = relay.Function([x, y], z)
mod = relay.Module.from_expr(func)
mod = relay.transform.FuseOps(0)(mod)
- func = mod[mod.entry_func]
+ func = mod["main"]
smap = relay.backend._backend.GraphPlanMemory(func)
storage_ids = set()
device_types = set()
fn = relay.Function([x], relay.expr.TupleGetItem(x, 0))
mod = relay.Module({})
- gv = relay.GlobalVar('fn')
+ gv = relay.GlobalVar('main')
mod[gv] = fn
- mod.entry_func = gv
mod = relay.transform.InferType()(mod)
ctx = tvm.cpu()
try:
mod = relay.Module.from_expr(expr)
mod = relay.transform.InferType()(mod)
- entry = mod[mod.entry_func]
+ entry = mod["main"]
expr = entry if isinstance(expr, relay.Function) else entry.body
assert False
except tvm.TVMError as err:
func = relay.Function([x], x + x)
mod = relay.Module.from_expr(gradient(func))
mod = relay.transform.InferType()(mod)
- back_func = mod[mod.entry_func]
+ back_func = mod["main"]
feats = detect_feature(back_func)
assert feats == set([
Feature.fVar,
def run_infer_type(expr):
mod = relay.Module.from_expr(expr)
mod = relay.transform.InferType()(mod)
- return mod[mod.entry_func]
+ return mod["main"]
def sigmoid(x):
def run_infer_type(expr):
mod = relay.Module.from_expr(expr)
mod = transform.InferType()(mod)
- entry = mod[mod.entry_func]
+ entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def sigmoid(x):
def run_infer_type(expr):
mod = relay.Module.from_expr(expr)
mod = transform.InferType()(mod)
- entry = mod[mod.entry_func]
+ entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_collapse_sum_like():
def run_infer_type(expr):
mod = relay.Module.from_expr(expr)
mod = transform.InferType()(mod)
- entry = mod[mod.entry_func]
+ entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_conv2d_infer_type():
def run_infer_type(expr):
mod = relay.Module.from_expr(expr)
mod = transform.InferType()(mod)
- entry = mod[mod.entry_func]
+ entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_zeros_ones():
def run_infer_type(expr):
mod = relay.Module.from_expr(expr)
mod = transform.InferType()(mod)
- entry = mod[mod.entry_func]
+ entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_binary_op():
def run_infer_type(expr):
mod = relay.Module.from_expr(expr)
mod = transform.InferType()(mod)
- entry = mod[mod.entry_func]
+ entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_resize_infer_type():
seq = transform.Sequential(passes)
with transform.PassContext(opt_level=3):
mod = seq(mod)
- entry = mod[mod.entry_func]
+ entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
seq = transform.Sequential(passes)
with transform.PassContext(opt_level=3):
mod = seq(mod)
- return mod[mod.entry_func]
+ return mod["main"]
def test_redundant_annotation():
_transform.InferType()])
with _transform.PassContext(opt_level=3):
mod = seq(mod)
- y = mod[mod.entry_func.name_hint]
+ y = mod["main"]
y_expected = expected(data, conv_weight, bias1, bias2)
gv = relay.GlobalVar("expected")
mod[gv] = y_expected
def run_combine_parallel(expr, min_num_branches=3):
mod = relay.Module.from_expr(expr)
mod = transform.CombineParallelConv2D(min_num_branches)(mod)
- return mod[mod.entry_func]
+ return mod["main"]
def run_opt_pass(expr, opt_pass):
assert isinstance(opt_pass, transform.Pass)
mod = relay.Module.from_expr(expr)
mod = opt_pass(mod)
- return mod[mod.entry_func]
+ return mod["main"]
def test_combine_parallel_conv2d():
assert isinstance(opt_pass, transform.Pass)
mod = relay.Module.from_expr(expr)
mod = opt_pass(mod)
- entry = mod[mod.entry_func]
+ entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
assert isinstance(opt_pass, transform.Pass)
mod = relay.Module.from_expr(expr)
mod = opt_pass(mod)
- entry = mod[mod.entry_func]
+ entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
with _transform.PassContext(opt_level=3):
mod = seq(mod)
- got = mod[mod.entry_func.name_hint]
+ got = mod["main"]
y = relay.var('y', 'int32')
expected = relay.Function([y], orig(y))
mod = relay.Module.from_expr(expr)
mod = opt_pass(mod)
- entry = mod[mod.entry_func]
+ entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
assert isinstance(opt_pass, transform.Pass)
mod = relay.Module.from_expr(expr)
mod = opt_pass(mod)
- entry = mod[mod.entry_func]
+ entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
m = fuse2(relay.Module.from_expr(orig))
relay.build(m, 'llvm')
after = run_opt_pass(expected(x), transform.InferType())
- assert relay.analysis.alpha_equal(m[m.entry_func], after)
+ assert relay.analysis.alpha_equal(m["main"], after)
def test_tuple_consecutive():
m = fuse2(relay.Module.from_expr(orig))
relay.build(m, 'llvm')
after = run_opt_pass(expected(dshape), transform.InferType())
- assert relay.analysis.alpha_equal(m[m.entry_func], after)
+ assert relay.analysis.alpha_equal(m["main"], after)
def test_inception_like():
m = fuse2(relay.Module.from_expr(orig))
relay.build(m, 'llvm')
after = run_opt_pass(expected(dshape), transform.InferType())
- assert relay.analysis.alpha_equal(m[m.entry_func], after)
+ assert relay.analysis.alpha_equal(m["main"], after)
def test_fuse_parallel_injective():
i = relay.var("i", t)
func = relay.Function([i], p.nat_iterate(double, make_nat_expr(p, 3))(i))
func = gradient(func, mod=mod)
- mod[mod.entry_func] = func
+ mod["main"] = func
m = transform.InferType()(mod)
- back_func = m[m.entry_func]
+ back_func = m["main"]
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
i_nd = rand(dtype, *shape)
ex = create_executor(mod=mod)
assert isinstance(opt_pass, transform.Pass)
mod = relay.Module.from_expr(expr)
mod = opt_pass(mod)
- entry = mod[mod.entry_func]
+ entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def run_infer_type(expr):
mod = relay.Module.from_expr(expr)
mod = _transform.InferType()(mod)
- entry = mod[mod.entry_func]
+ entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
seq = transform.Sequential(passes)
with transform.PassContext(opt_level=3):
mod = seq(mod)
- entry = mod[mod.entry_func]
+ entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
expr = gradient(expr)
if mod:
assert isinstance(expr, Function)
- mod[mod.entry_func] = expr
+ mod["main"] = expr
seq = transform.Sequential(passes)
mod = seq(mod)
- return mod[mod.entry_func]
+ return mod["main"]
return run_opt_pass(expr, passes)
orig = p.map(f, p.cons(const(1), p.cons(const(2), p.cons(const(3), p.nil()))))
expected = p.cons((const(1)), p.cons((const(2)), p.cons((const(3)), p.nil())))
expected = Function([], expected)
- mod[mod.entry_func] = expected
- expected = mod[mod.entry_func]
+ mod["main"] = expected
+ expected = mod["main"]
orig = Function([], orig)
res = dcpe(orig, mod=mod)
assert alpha_equal(res.body, expected.body)
loop = GlobalVar("loop")
mod[loop] = Function([x], loop(x), t, [t])
expected = Call(loop, [const(1)])
- mod[mod.entry_func] = Function([], expected)
- expected = mod[mod.entry_func].body
+ mod["main"] = Function([], expected)
+ expected = mod["main"].body
call = Function([], loop(const(1)))
res = dcpe(call, mod=mod)
assert alpha_equal(res.body, expected)
def run_infer_type(expr):
mod = relay.Module.from_expr(expr)
mod = transform.InferType()(mod)
- entry = mod[mod.entry_func]
+ entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
seq = transform.Sequential(passes)
with transform.PassContext(opt_level=3):
mod = seq(mod)
- entry = mod[mod.entry_func]
+ entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
net = relay.Function([cond,x,y], net)
mod = relay.Module.from_expr(net)
mod = relay.transform.ToANormalForm()(mod)
- mod[mod.entry_func] = relay.transform.gradient(mod[mod.entry_func], mode='higher_order')
+ mod["main"] = relay.transform.gradient(mod["main"], mode='higher_order')
mod = relay.transform.ToANormalForm()(mod)
double = relay.Function([x], x + x)
i = relay.var("i", t)
func = relay.Function([i], p.nat_iterate(double, make_nat_expr(p, 3))(i))
- mod[mod.entry_func] = func
- mod[mod.entry_func] = to_cps(mod[mod.entry_func], mod=mod)
- mod[mod.entry_func] = un_cps(mod[mod.entry_func])
+ mod["main"] = func
+ mod["main"] = to_cps(mod["main"], mod=mod)
+ mod["main"] = un_cps(mod["main"])
ex = create_executor(mod=mod)
i_nd = rand(dtype, *shape)
- forward = ex.evaluate(mod.entry_func)(i_nd)
+ forward = ex.evaluate()(i_nd)
tvm.testing.assert_allclose(forward.asnumpy(), 8 * i_nd.asnumpy())
def run_opt_pass(expr, opt_pass):
mod = relay.Module.from_expr(expr)
mod = opt_pass(mod)
- entry = mod[mod.entry_func]
+ entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
if not mod:
mod = relay.Module.from_expr(expr)
mod = transform.InferType()(mod)
- entry = mod[mod.entry_func]
+ entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
else:
if isinstance(expr, relay.GlobalVar):
func = expr
if not isinstance(expr, relay.Function):
func = relay.Function(analysis.free_vars(expr), expr)
- mod[mod.entry_func] = func
+ mod["main"] = func
gv = "main"
mod = transform.InferType()(mod)
def test_global_var_recursion():
mod = relay.Module({})
- gv = relay.GlobalVar("foo")
+ gv = relay.GlobalVar("main")
x = relay.var('x', shape=[])
tt = relay.scalar_type('float32')
b = relay.Var("b", t)
mod = relay.Module.from_expr(make_id(b))
mod = transform.InferType()(mod)
- inferred = mod[mod.entry_func].body
+ inferred = mod["main"].body
assert inferred.checked_type == relay.TupleType([t, t])
make_id = relay.Var("make_id", relay.FuncType([b], id_type(b), [b]))
t = relay.scalar_type("float32")
b = relay.Var("b", t)
- mod[mod.entry_func] = relay.Function([], make_id(b))
+ mod["main"] = relay.Function([], make_id(b))
mod = transform.InferType()(mod)
- assert mod[mod.entry_func].body.checked_type == id_type(t)
+ assert mod["main"].body.checked_type == id_type(t)
if __name__ == "__main__":
mod[sum_up] = func
i_data = np.array(0, dtype='int32')
iarg = relay.var('i', shape=[], dtype='int32')
- mod[mod.entry_func] = relay.Function([iarg], sum_up(iarg))
+ mod["main"] = relay.Function([iarg], sum_up(iarg))
result = veval(mod, i_data)
tvm.testing.assert_allclose(result.asnumpy(), i_data)
mod[sum_up] = func
i_data = np.array(0, dtype='int32')
iarg = relay.var('i', shape=[], dtype='int32')
- mod[mod.entry_func] = relay.Function([iarg], sum_up(iarg))
+ mod["main"] = relay.Function([iarg], sum_up(iarg))
result = veval(mod, i_data)
tvm.testing.assert_allclose(result.asnumpy(), i_data)
accum_data = np.array(0, dtype='int32')
iarg = relay.var('i', shape=[], dtype='int32')
aarg = relay.var('accum', shape=[], dtype='int32')
- mod[mod.entry_func] = relay.Function([iarg, aarg], sum_up(iarg, aarg))
+ mod["main"] = relay.Function([iarg, aarg], sum_up(iarg, aarg))
result = veval(mod, i_data, accum_data)
tvm.testing.assert_allclose(result.asnumpy(), sum(range(1, loop_bound + 1)))
one4 = cons(relay.const(3), one3)
f = relay.Function([], one4)
- mod[mod.entry_func] = f
+ mod["main"] = f
result = veval(mod)()
obj = to_list(result)
mod[add_one] = add_one_func
f = relay.Function([y], add_two_body)
- mod[mod.entry_func] = f
+ mod["main"] = f
x_data = np.array(np.random.rand()).astype('float32')
result = veval(mod)(x_data)
conv2 = relay.nn.conv2d(conv1, w2, channels=32, kernel_size=(3, 3), padding=(1, 1))
out = relay.add(conv1, conv2)
net = relay.Function(relay.analysis.free_vars(out), out)
- net, params = relay.testing.create_workload(net)
- tasks = autotvm.task.extract_from_program(net,
+ mod, params = relay.testing.create_workload(net)
+ tasks = autotvm.task.extract_from_program(mod["main"],
target=target,
params=params,
ops=(relay.op.nn.conv2d,))
g, records, ltf_records, ltf_keys, tasks = _create_data(target, dshape, dtype, layout)
mod = relay.module.Module()
- mod[mod.entry_func] = g
+ mod["main"] = g
costs = [0.02, 0.02, 0.045]
config_list = []
cfg_dict = {"i": -1,
def test_expr2graph():
- net, _ = resnet.get_workload(num_layers=50, batch_size=1)
+ mod, _ = resnet.get_workload(num_layers=50, batch_size=1)
node_dict = {}
node_list = []
target_ops = ["conv2d"]
op_name_list.append("Tuple")
else:
op_name_list.append("null")
- relay.analysis.post_order_visit(net, _count_node)
+ relay.analysis.post_order_visit(mod["main"], _count_node)
- expr2graph(net, target_ops, node_dict, node_list)
+ expr2graph(mod["main"], target_ops, node_dict, node_list)
for i, item in enumerate(zip(op_name_list, node_list)):
op_name, node = item
assert op_name == node["op"], "%dth Node operator mismatch: expecting %s but got %s" \
if "resnet" in name:
n_layer = int(name.split('-')[1])
- net, params = relay.testing.resnet.get_workload(num_layers=n_layer, batch_size=batch_size, dtype=dtype)
+ mod, params = relay.testing.resnet.get_workload(num_layers=n_layer, batch_size=batch_size, dtype=dtype)
elif "vgg" in name:
n_layer = int(name.split('-')[1])
- net, params = relay.testing.vgg.get_workload(num_layers=n_layer, batch_size=batch_size, dtype=dtype)
+ mod, params = relay.testing.vgg.get_workload(num_layers=n_layer, batch_size=batch_size, dtype=dtype)
elif name == 'mobilenet':
- net, params = relay.testing.mobilenet.get_workload(batch_size=batch_size)
+ mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size)
elif name == 'squeezenet_v1.1':
- net, params = relay.testing.squeezenet.get_workload(batch_size=batch_size, version='1.1', dtype=dtype)
+ mod, params = relay.testing.squeezenet.get_workload(batch_size=batch_size, version='1.1', dtype=dtype)
elif name == 'inception_v3':
input_shape = (1, 3, 299, 299)
- net, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
+ mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == 'mxnet':
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
block = get_model('resnet18_v1', pretrained=True)
mod, params = relay.frontend.from_mxnet(block, shape={'data': input_shape}, dtype=dtype)
- net = mod[mod.entry_func]
+ net = mod["main"]
net = relay.Function(net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs)
+ mod = relay.Module.from_expr(net)
else:
raise ValueError("Unsupported network: " + name)
- return net, params, input_shape, output_shape
+ return mod, params, input_shape, output_shape
#################################################################
def tune_and_evaluate(tuning_opt):
# extract workloads from relay program
print("Extract tasks...")
- net, params, input_shape, _ = get_network(network, batch_size=1)
- tasks = autotvm.task.extract_from_program(net, target=target,
- params=params,
- ops=(relay.op.nn.conv2d,))
+ mod, params, input_shape, _ = get_network(network, batch_size=1)
+ tasks = autotvm.task.extract_from_program(mod["main"], target=target,
+ params=params,
+ ops=(relay.op.nn.conv2d,))
# run tuning tasks
print("Tuning...")
print("Compile...")
with relay.build_config(opt_level=3):
graph, lib, params = relay.build_module.build(
- net, target=target, params=params)
+ mod, target=target, params=params)
# export library
tmp = tempdir()
if "resnet" in name:
n_layer = int(name.split('-')[1])
- net, params = relay.testing.resnet.get_workload(num_layers=n_layer, batch_size=batch_size, dtype=dtype)
+ mod, params = relay.testing.resnet.get_workload(num_layers=n_layer, batch_size=batch_size, dtype=dtype)
elif "vgg" in name:
n_layer = int(name.split('-')[1])
- net, params = relay.testing.vgg.get_workload(num_layers=n_layer, batch_size=batch_size, dtype=dtype)
+ mod, params = relay.testing.vgg.get_workload(num_layers=n_layer, batch_size=batch_size, dtype=dtype)
elif name == 'mobilenet':
- net, params = relay.testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype)
+ mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype)
elif name == 'squeezenet_v1.1':
- net, params = relay.testing.squeezenet.get_workload(batch_size=batch_size, version='1.1', dtype=dtype)
+ mod, params = relay.testing.squeezenet.get_workload(batch_size=batch_size, version='1.1', dtype=dtype)
elif name == 'inception_v3':
input_shape = (1, 3, 299, 299)
- net, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
+ mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == 'mxnet':
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
block = get_model('resnet18_v1', pretrained=True)
mod, params = relay.frontend.from_mxnet(block, shape={'data': input_shape}, dtype=dtype)
- net = mod[mod.entry_func]
+ net = mod["main"]
net = relay.Function(net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs)
+ mod = relay.Module.from_expr(net)
else:
raise ValueError("Unsupported network: " + name)
- return net, params, input_shape, output_shape
+ return mod, params, input_shape, output_shape
###########################################
# Set Tuning Options
def tune_and_evaluate(tuning_opt):
# extract workloads from relay program
print("Extract tasks...")
- net, params, input_shape, out_shape = get_network(network, batch_size=1)
- tasks = autotvm.task.extract_from_program(net, target=target,
- params=params, ops=(relay.op.nn.conv2d,))
+ mod, params, input_shape, out_shape = get_network(network, batch_size=1)
+ tasks = autotvm.task.extract_from_program(mod["main"], target=target,
+ params=params, ops=(relay.op.nn.conv2d,))
# run tuning tasks
print("Tuning...")
print("Compile...")
with relay.build_config(opt_level=3):
graph, lib, params = relay.build_module.build(
- net, target=target, params=params)
+ mod, target=target, params=params)
# export library
tmp = tempdir()
if "resnet" in name:
n_layer = int(name.split('-')[1])
- net, params = relay.testing.resnet.get_workload(num_layers=n_layer, batch_size=batch_size, dtype=dtype)
+ mod, params = relay.testing.resnet.get_workload(num_layers=n_layer, batch_size=batch_size, dtype=dtype)
elif "vgg" in name:
n_layer = int(name.split('-')[1])
- net, params = relay.testing.vgg.get_workload(num_layers=n_layer, batch_size=batch_size, dtype=dtype)
+ mod, params = relay.testing.vgg.get_workload(num_layers=n_layer, batch_size=batch_size, dtype=dtype)
elif name == 'mobilenet':
- net, params = relay.testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype)
+ mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype)
elif name == 'squeezenet_v1.1':
- net, params = relay.testing.squeezenet.get_workload(batch_size=batch_size, version='1.1', dtype=dtype)
+ mod, params = relay.testing.squeezenet.get_workload(batch_size=batch_size, version='1.1', dtype=dtype)
elif name == 'inception_v3':
input_shape = (1, 3, 299, 299)
- net, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
+ mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == 'mxnet':
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
block = get_model('resnet18_v1', pretrained=True)
mod, params = relay.frontend.from_mxnet(block, shape={'data': input_shape}, dtype=dtype)
- net = mod[mod.entry_func]
+ net = mod["main"]
net = relay.Function(net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs)
+ mod = relay.Module.from_expr(net)
else:
raise ValueError("Unsupported network: " + name)
- return net, params, input_shape, output_shape
+ return mod, params, input_shape, output_shape
#################################################################
def tune_and_evaluate(tuning_opt):
# extract workloads from relay program
print("Extract tasks...")
- net, params, input_shape, _ = get_network(network, batch_size=1)
- tasks = autotvm.task.extract_from_program(net, target=target, target_host=target_host,
+ mod, params, input_shape, _ = get_network(network, batch_size=1)
+ tasks = autotvm.task.extract_from_program(mod["main"],
+ target=target,
+ target_host=target_host,
params=params, ops=(relay.op.nn.conv2d,))
# run tuning tasks
print("Compile...")
with relay.build_config(opt_level=3):
graph, lib, params = relay.build_module.build(
- net, target=target, params=params, target_host=target_host)
+ mod, target=target, params=params, target_host=target_host)
# export library
tmp = tempdir()
if use_android:
if "resnet" in name:
n_layer = int(name.split('-')[1])
- net, params = relay.testing.resnet.get_workload(num_layers=n_layer, batch_size=batch_size, dtype=dtype)
+ mod, params = relay.testing.resnet.get_workload(num_layers=n_layer, batch_size=batch_size, dtype=dtype)
elif "vgg" in name:
n_layer = int(name.split('-')[1])
- net, params = relay.testing.vgg.get_workload(num_layers=n_layer, batch_size=batch_size, dtype=dtype)
+ mod, params = relay.testing.vgg.get_workload(num_layers=n_layer, batch_size=batch_size, dtype=dtype)
elif name == 'mobilenet':
- net, params = relay.testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype)
+ mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype)
elif name == 'squeezenet_v1.1':
- net, params = relay.testing.squeezenet.get_workload(batch_size=batch_size, version='1.1', dtype=dtype)
+ mod, params = relay.testing.squeezenet.get_workload(batch_size=batch_size, version='1.1', dtype=dtype)
elif name == 'inception_v3':
input_shape = (1, 3, 299, 299)
- net, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
+ mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == 'mxnet':
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
block = get_model('resnet18_v1', pretrained=True)
mod, params = relay.frontend.from_mxnet(block, shape={'data': input_shape}, dtype=dtype)
- net = mod[mod.entry_func]
+ net = mod["main"]
net = relay.Function(net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs)
+ mod = relay.Module.from_expr(net)
else:
raise ValueError("Unsupported network: " + name)
- return net, params, input_shape, output_shape
+ return mod, params, input_shape, output_shape
# Replace "llvm" with the correct target of your CPU.
# For example, for AWS EC2 c5 instance with Intel Xeon
def tune_and_evaluate(tuning_opt):
# extract workloads from relay program
print("Extract tasks...")
- net, params, data_shape, out_shape = get_network(model_name, batch_size)
- tasks = autotvm.task.extract_from_program(net, target=target,
+ mod, params, data_shape, out_shape = get_network(model_name, batch_size)
+ tasks = autotvm.task.extract_from_program(mod["main"], target=target,
params=params, ops=(relay.op.nn.conv2d,))
# run tuning tasks
print("Tuning...")
tune_kernels(tasks, **tuning_opt)
- tune_graph(net, data_shape, log_file, graph_opt_sch_file)
+ tune_graph(mod["main"], data_shape, log_file, graph_opt_sch_file)
# compile kernels with graph-level best records
with autotvm.apply_graph_best(graph_opt_sch_file):
print("Compile...")
with relay.build_config(opt_level=3):
graph, lib, params = relay.build_module.build(
- net, target=target, params=params)
+ mod, target=target, params=params)
# upload parameters to device
ctx = tvm.cpu()
shape_dict = {'data': x.shape}
mod, params = relay.frontend.from_mxnet(block, shape_dict)
# we want a probability so add a softmax operator
-func = mod[mod.entry_func]
+func = mod["main"]
func = relay.Function(func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs)
######################################################################
shape_dict = {'data': x.shape}
mod, params = relay.frontend.from_mxnet(block, shape_dict)
## we want a probability so add a softmax operator
-func = mod[mod.entry_func]
+func = mod["main"]
func = relay.Function(func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs)
######################################################################
data_shape = (batch_size,) + image_shape
out_shape = (batch_size, num_class)
-net, params = relay.testing.resnet.get_workload(
+mod, params = relay.testing.resnet.get_workload(
num_layers=18, batch_size=batch_size, image_shape=image_shape)
# set show_meta_data=True if you want to show meta data
-print(net.astext(show_meta_data=False))
+print(mod.astext(show_meta_data=False))
######################################################################
# Compilation
target = tvm.target.cuda()
with relay.build_config(opt_level=opt_level):
graph, lib, params = relay.build_module.build(
- net, target, params=params)
+ mod, target, params=params)
#####################################################################
# Run the generate library
assert isinstance(opt_pass, transform.Pass)
mod = relay.Module.from_expr(expr)
mod = opt_pass(mod)
- entry = mod[mod.entry_func]
+ entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def _to_shape(shape):
# Perform quantization in Relay
with relay.quantize.qconfig(global_scale=8.0,
skip_conv_layers=[0]):
- relay_prog = relay.quantize.quantize(mod[mod.entry_func], params=params)
+ relay_prog = relay.quantize.quantize(mod["main"], params=params)
# Perform graph packing and constant folding for VTA target
if target.device_name == "vta":
# Perform quantization in Relay
with relay.quantize.qconfig(global_scale=8.0,
skip_conv_layers=[0]):
- relay_prog = relay.quantize.quantize(mod[mod.entry_func], params=params)
+ relay_prog = relay.quantize.quantize(mod["main"], params=params)
# Perform graph packing and constant folding for VTA target
if target.device_name == "vta":
# Perform quantization in Relay
with relay.quantize.qconfig(global_scale=8.0,
skip_conv_layers=[0]):
- relay_prog = relay.quantize.quantize(mod[mod.entry_func], params=params)
+ relay_prog = relay.quantize.quantize(mod["main"], params=params)
# Perform graph packing and constant folding for VTA target
if target.device_name == "vta":