Summary:
This adds `self` to the list of reserved words and also sorts the lines and prevents the tracer from naming values 'self' (which happens in torch/tensor.py)
Fixes #15240
Pull Request resolved: https://github.com/pytorch/pytorch/pull/15318
Differential Revision:
D13498974
Pulled By: driazati
fbshipit-source-id:
488efb661476cdcdb8ecb9cb48942f02e3c1e611
producer_version: "0.4"
graph {
node {
- input: "self"
+ input: "0"
output: "1"
op_type: "ReduceL2"
attribute {
}
name: "torch-jit-export"
input {
- name: "self"
+ name: "0"
type {
tensor_type {
elem_type: 1
// they are keywords or namespaces used in the output
const static std::unordered_set<std::string> reserved_names = {
// identifiers in the environment while parsing
+ "_", // avoid the confusing unnamed _
"aten",
- "ops",
+ "attribute",
"CONSTANTS",
"fork",
- "attribute",
"getattr",
- "_", // avoid the confusing unnamed _
"inf",
"nan",
+ "ops",
+ "self",
// the python keywords
- "False",
- "None",
- "True",
"and",
"as",
"assert",
+ "async",
+ "await",
"break",
"class",
"continue",
"elif",
"else",
"except",
+ "False",
"finally",
"for",
"from",
"in",
"is",
"lambda",
+ "None",
"nonlocal",
"not",
"or",
"pass",
"raise",
"return",
+ "True",
"try",
"while",
"with",
for k, v in f_locals.items():
if isinstance(v, torch.Tensor) and var is v:
- return k
+ return k if k != 'self' else ''
for k, v in f_globals.items():
if isinstance(v, torch.Tensor) and var is v:
- return k
+ return k if k != 'self' else ''
return ''
return _get_interpreter_name_for_var