// We don't support composite types for alias analysis yet.
AT_ASSERT(formal->containedTypes().size() == 0);
- const auto& formalAlias = formal->set();
- auto outputAlias = formalToActual.at(formalAlias);
+ for (const auto& formalAlias : formal->sets()) {
+ // If we encounter an alias annotation that wasn't in the inputs:
+ if (!formalToActual.count(formalAlias)) {
+ // If this alias is not seen elsewhere and is the only annotation on
+ // the output, it's equivalent to being fresh:
+ // e.g. foo(Tensor(a) self) -> Tensor(b)
+ if (formal->sets().size() == 1) {
+ giveFreshAlias(actual);
+ }
+ // Or it is the form of a|fresh, which we can ignore, taking the
+ // conservative assumption that the output must alias `a`, e.g
+ // aten::cuda(Tensor(a) self) -> Tensor(a|fresh)
- // Record writes
- for (const auto& alias : outputAlias.sets()) {
- if (formal->isWrite()) {
- aliasToWrites_[alias].insert(node);
+ // Don't assign an alias set in that case.
+ continue;
}
- }
- addAlias(actual, outputAlias);
+ auto outputAlias = formalToActual.at(formalAlias);
+
+ // Record writes
+ for (const auto& alias : outputAlias.sets()) {
+ if (formal->isWrite()) {
+ aliasToWrites_[alias].insert(node);
+ }
+ }
+
+ addAlias(actual, outputAlias);
+ }
}
// Keep the wildcard index up to date.
if (hasWildcardImpl(node)) {
}),
// reference function parse_to_conversion in python_arg_parsing.h
Operator(
- "aten::to(Tensor(a) self, Device? device, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a)",
+ "aten::to(Tensor(a) self, Device? device, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a|b)",
[](const Node* node) -> Operation {
return [](Stack& stack) {
bool non_blocking;
};
}),
Operator(
- "aten::to(Tensor(a) self, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a)",
+ "aten::to(Tensor(a) self, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a|b)",
[](const Node* node) -> Operation {
return [](Stack& stack) {
bool non_blocking;
};
}),
Operator(
- "aten::to(Tensor(a) self, bool non_blocking=False, bool copy=False) -> Tensor(a)",
+ "aten::to(Tensor(a) self, bool non_blocking=False, bool copy=False) -> Tensor(a|b)",
[](const Node* node) -> Operation {
return [](Stack& stack) {
at::Tensor self;
};
}),
Operator(
- "aten::cpu(Tensor(a) self) -> Tensor(a)",
+ "aten::cpu(Tensor(a) self) -> Tensor(a|b)",
[](const Node* node) -> Operation {
return [](Stack& stack) {
at::Tensor a;
};
}),
Operator(
- "aten::cuda(Tensor(a) self) -> Tensor(a)",
+ "aten::cuda(Tensor(a) self) -> Tensor(a|b)",
[](const Node* node) -> Operation {
return [](Stack& stack) {
at::Tensor a;