num_split = len(num_or_size_splits) if isinstance(num_or_size_splits, list) else num_or_size_splits
tf.split(in_data, num_or_size_splits, axis=axis)
- compare_tf_with_tvm([np_data], ['in_data:0'], [f'split:{n}' for n in range(num_split)])
+ compare_tf_with_tvm([np_data], ['in_data:0'], ['split:{0}'.format(n) for n in range(num_split)])
# and now test together with concat
tf.reset_default_graph()
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.unstack(in_data, axis=axis)
- compare_tf_with_tvm([np_data], ['in_data:0'], [f'unstack:{n}' for n in range(ip_shape[axis])])
+ compare_tf_with_tvm([np_data], ['in_data:0'], ['unstack:{0}'.format(n) for n in range(ip_shape[axis])])
tf.reset_default_graph()
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
def mk_global_var(self, name: str) -> expr.GlobalVar:
"""Create a new GlobalVar and add it to the GlobalVar scope."""
if name in self.global_vars:
- raise ParseError(f"duplicate global var \"{name}\"")
+ raise ParseError("duplicate global var \"{0}\"".format(name))
var = expr.GlobalVar(name)
self.global_vars[name] = var
return var
new_typ_name = self._type_expr_name(new_expr)
existing_typ_name = self._type_expr_name(self.global_type_vars[name])
raise ParseError(
- f"{new_typ_name} `{name}` conflicts with existing {existing_typ_name}")
+ "{0} `{1}` conflicts with existing {2}".format(new_typ_name,\
+ name, existing_typ_name))
def _type_expr_name(self, e):
if isinstance(e, adt.Constructor):
- return f"`{e.belong_to.var.name}` ADT constructor"
+ return "`{0}` ADT constructor".format(e.belong_to.var.name)
elif isinstance(e, ty.GlobalTypeVar):
if e.kind == ty.Kind.AdtHandle:
- return f"ADT definition"
+ return "ADT definition"
return "function definition"
def visitProjection(self, ctx):
raise ParseError("unrecognized BOOL_LIT: `{}`".format(node_text))
if node_type == RelayLexer.QUOTED_STRING:
return literal_eval(node_text)
- raise ParseError(f"unhandled terminal \"{node_text}\" of type `{node_type}`")
+ raise ParseError("unhandled terminal \"{0}\" of type `{1}`".format(node_text, node_type))
def visitGeneralIdent(self, ctx):
name = ctx.getText()
var_name = ctx.CNAME().getText()
global_var = self.global_vars.get(var_name, None)
if global_var is None:
- raise ParseError(f"unbound global var `{var_name}`")
+ raise ParseError("unbound global var `{0}`".format(var_name))
return global_var
def visitLocalVar(self, ctx):
var_name = ctx.CNAME().getText()
local_var = lookup(self.var_scopes, var_name)
if local_var is None:
- raise ParseError(f"unbound local var `{var_name}`")
+ raise ParseError("unbound local var `{0}`".format(var_name))
return local_var
def visitGraphVar(self, ctx):
elif match_type == "match?":
complete_match = False
else:
- raise RuntimeError(f"unknown match type {match_type}")
+ raise RuntimeError("unknown match type {0}".format(match_type))
match_data = self.visit(ctx.expr())
match_clauses = ctx.matchClauseList()
for field_ty in typ.fields:
_unpack(field_ty, out)
else:
- raise Exception(f"unsupported Relay type: {typ}")
+ raise Exception("unsupported Relay type: {0}".format(typ))
output = []
_unpack(self.typ, output)
_pack(value[i], field_ty, tuple_out)
out.append(expr.Tuple(tuple_out))
else:
- raise Exception(f"unsupported Relay type: {typ}")
+ raise Exception("unsupported Relay type: {0}".format(typ))
if len(seq) == 1:
return seq[0]
size = self.compute_storage(tensor_type)
alignment = self.compute_alignment(tensor_type.dtype)
dtype = tensor_type.dtype
- sto = scope.let(f"storage_{i}", self.alloc_storage(
+ sto = scope.let("storage_{0}".format(i), self.alloc_storage(
size, alignment, dtype))
# TODO(@jroesch): There is a bug with typing based on the constant shape.
tensor = self.alloc_tensor(sto, shape, dtype, tensor_type.shape)
- return scope.let(f"tensor_{i}", tensor)
+ return scope.let("tensor_{0}".format(i), tensor)
def visit_let(self, let):
scope = ScopeBuilder()
if state == 2:
sh_of = self.visit(self.shape_of(arg))
shape_func_ins.append(
- scope.let(f"in_shape_{i}", sh_of))
+ scope.let("in_shape_{0}".format(i), sh_of))
is_inputs.append(0)
# Pass Inputs
elif state == 1:
new_arg = self.visit(arg)
shape_func_ins.append(
- scope.let(f"in_shape_{i}", new_arg))
+ scope.let("in_shape_{0}".format(i), new_arg))
is_inputs.append(1)
# TODO(@jroesch): handle 3rd case
else:
for i, out in enumerate(cfunc.outputs):
tt = ty.TensorType(out.shape, out.dtype)
alloc = self.make_static_allocation(scope, tt, i)
- alloc = scope.let(f"shape_func_out_{i}", alloc)
+ alloc = scope.let("shape_func_out_{0}".format(i), alloc)
out_shapes.append(alloc)
shape_call = self.shape_func(
size = self.compute_storage_in_relay(
out_shape, out_type.dtype)
alignment = self.compute_alignment(out_type.dtype)
- sto = scope.let(f"storage_{i}", self.alloc_storage(
+ sto = scope.let("storage_{i}".format(i=i), self.alloc_storage(
size, alignment, out_type.dtype))
storages.append(sto)
out_shape,
out_type.dtype,
out_type.shape)
- alloc = scope.let(f"out_{i}", alloc)
+ alloc = scope.let("out_{i}".format(i=i), alloc)
outs.append(alloc)
invoke = self.invoke_tvm(call.op, ins, expr.Tuple(outs))
def test_add(target_dir):
if not tvm.module.enabled("cuda"):
- print(f"skip {__file__} because cuda is not enabled...")
+ print("skip {__file__} because cuda is not enabled...".format(__file__=__file__))
return
n = tvm.var("n")
A = tvm.placeholder((n,), name='A')