}),
Operator(
"prim::Bool(Tensor a) -> bool",
- [](const Node* node) -> Operation {
- return [](Stack& stack) {
- at::Tensor a;
- pop(stack, a);
- push(stack, a.item<int64_t>() != 0);
- return 0;
- };
+ [](Stack& stack) {
+ at::Tensor a;
+ pop(stack, a);
+ push(stack, a.item<int64_t>() != 0);
+ return 0;
}),
Operator(
"prim::Bool(int a) -> bool",
- [](const Node* node) -> Operation {
- return [](Stack& stack) {
- int64_t i;
- pop(stack, i);
- push(stack, (bool)i);
- return 0;
- };
+ [](Stack& stack) {
+ int64_t i;
+ pop(stack, i);
+ push(stack, (bool)i);
+ return 0;
}),
Operator(
"prim::Bool(float a) -> bool",
- [](const Node* node) -> Operation {
- return [](Stack& stack) {
- double d;
- pop(stack, d);
- push(stack, (bool)d);
- return 0;
- };
+ [](Stack& stack) {
+ double d;
+ pop(stack, d);
+ push(stack, (bool)d);
+ return 0;
}),
Operator(
"prim::Int(Tensor a) -> int",
- [](const Node* node) -> Operation {
- return [](Stack& stack) {
- at::Tensor a;
- pop(stack, a);
- push(stack, a.item<int64_t>());
- return 0;
- };
+ [](Stack& stack) {
+ at::Tensor a;
+ pop(stack, a);
+ push(stack, a.item<int64_t>());
+ return 0;
}),
Operator(
"prim::Float(Tensor a) -> float",
- [](const Node* node) -> Operation {
- return [](Stack& stack) {
- at::Tensor a;
- pop(stack, a);
- push(stack, a.item<double>());
- return 0;
- };
+ [](Stack& stack) {
+ at::Tensor a;
+ pop(stack, a);
+ push(stack, a.item<double>());
+ return 0;
}),
Operator(
"prim::ImplicitTensorToNum(Tensor a) -> Scalar",
}),
Operator(
"prim::NumToTensor(Scalar a) -> Tensor",
- [](const Node* node) -> Operation {
- return [](Stack& stack) {
- at::Scalar s;
- pop(stack, s);
- push(stack, autograd::make_variable(at::scalar_to_tensor(s)));
- return 0;
- };
+ [](Stack& stack) {
+ at::Scalar s;
+ pop(stack, s);
+ push(stack, autograd::make_variable(at::scalar_to_tensor(s)));
+ return 0;
}),
// note: this op needs to share a name with the Scalar -> Tensor conversion
// because all _to_tensor conversion have to have the same operator namet
Operator(
"prim::NumToTensor(bool a) -> Tensor",
- [](const Node* node) -> Operation {
- return [](Stack& stack) {
- bool b;
- pop(stack, b);
- push(stack, autograd::make_variable(at::scalar_to_tensor(b)));
- return 0;
- };
+ [](Stack& stack) {
+ bool b;
+ pop(stack, b);
+ push(stack, autograd::make_variable(at::scalar_to_tensor(b)));
+ return 0;
}),
Operator(
"prim::Float(int a) -> float",
- [](const Node* node) -> Operation {
- return [](Stack& stack) {
- int64_t i;
- pop(stack, i);
- push(stack, (float)i);
- return 0;
- };
+ [](Stack& stack) {
+ int64_t i;
+ pop(stack, i);
+ push(stack, (float)i);
+ return 0;
}),
Operator(
"prim::Int(float a) -> int",
- [](const Node* node) -> Operation {
- return [](Stack& stack) {
- double d;
- pop(stack, d);
- push(stack, (int64_t)d);
- return 0;
- };
+ [](Stack& stack) {
+ double d;
+ pop(stack, d);
+ push(stack, (int64_t)d);
+ return 0;
}),
Operator(
"prim::Float(bool a) -> float",
- [](const Node* node) -> Operation {
- return [](Stack& stack) {
- bool b;
- pop(stack, b);
- push(stack, (float)b);
- return 0;
- };
+ [](Stack& stack) {
+ bool b;
+ pop(stack, b);
+ push(stack, (float)b);
+ return 0;
}),
Operator(
"prim::Int(bool a) -> int",
- [](const Node* node) -> Operation {
- return [](Stack& stack) {
- bool b;
- pop(stack, b);
- push(stack, (int)b);
- return 0;
- };
+ [](Stack& stack) {
+ bool b;
+ pop(stack, b);
+ push(stack, (int)b);
+ return 0;
}),
Operator(
"prim::Float(str a) -> float",
- [](const Node* node) -> Operation {
- return [](Stack& stack) {
- auto s = pop(stack).toString();
- if (s->string() == "inf")
- push(stack, std::numeric_limits<double>::infinity());
- else if (s->string() == "-inf")
- push(stack, -std::numeric_limits<double>::infinity());
- else
- AT_ERROR(
- "Only 'inf' or '-inf' can be cast to a float, but got '",
- s->string(),
- "'");
- return 0;
- };
+ [](Stack& stack) {
+ auto s = pop(stack).toString();
+ if (s->string() == "inf")
+ push(stack, std::numeric_limits<double>::infinity());
+ else if (s->string() == "-inf")
+ push(stack, -std::numeric_limits<double>::infinity());
+ else
+ AT_ERROR(
+ "Only 'inf' or '-inf' can be cast to a float, but got '",
+ s->string(),
+ "'");
+ return 0;
}),
Operator(
"aten::device(str a) -> Device",
- [](const Node* node) -> Operation {
- return [](Stack& stack) {
- push(stack, c10::Device(pop(stack).toStringRef()));
- return 0;
- };
+ [](Stack& stack) {
+ push(stack, c10::Device(pop(stack).toStringRef()));
+ return 0;
}),
// reference function parse_to_conversion in python_arg_parsing.h
Operator(
"aten::to(Tensor(a) self, Device? device, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a|b)",
- [](const Node* node) -> Operation {
- return [](Stack& stack) {
- bool non_blocking;
- bool copy;
- pop(stack, non_blocking, copy);
- c10::optional<at::ScalarType> scalarType =
- pop(stack).toOptional<at::ScalarType>();
- c10::optional<c10::Device> device =
- pop(stack).toOptional<c10::Device>();
- at::Tensor self = pop(stack).toTensor();
- push(
- stack,
- to_dispatch(self, device, scalarType, non_blocking, copy));
- return 0;
- };
+ [](Stack& stack) {
+ bool non_blocking;
+ bool copy;
+ pop(stack, non_blocking, copy);
+ c10::optional<at::ScalarType> scalarType =
+ pop(stack).toOptional<at::ScalarType>();
+ c10::optional<c10::Device> device =
+ pop(stack).toOptional<c10::Device>();
+ at::Tensor self = pop(stack).toTensor();
+ push(
+ stack, to_dispatch(self, device, scalarType, non_blocking, copy));
+ return 0;
}),
Operator(
"aten::to(Tensor(a) self, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a|b)",
- [](const Node* node) -> Operation {
- return [](Stack& stack) {
- bool non_blocking;
- bool copy;
- pop(stack, non_blocking, copy);
- c10::optional<at::ScalarType> scalarType =
- pop(stack).toOptional<at::ScalarType>();
- c10::optional<c10::Device> device = c10::nullopt;
- at::Tensor self = pop(stack).toTensor();
- push(
- stack,
- to_dispatch(self, device, scalarType, non_blocking, copy));
- return 0;
- };
+ [](Stack& stack) {
+ bool non_blocking;
+ bool copy;
+ pop(stack, non_blocking, copy);
+ c10::optional<at::ScalarType> scalarType =
+ pop(stack).toOptional<at::ScalarType>();
+ c10::optional<c10::Device> device = c10::nullopt;
+ at::Tensor self = pop(stack).toTensor();
+ push(
+ stack, to_dispatch(self, device, scalarType, non_blocking, copy));
+ return 0;
}),
Operator(
"aten::to(Tensor(a) self, bool non_blocking=False, bool copy=False) -> Tensor(a|b)",
- [](const Node* node) -> Operation {
- return [](Stack& stack) {
- at::Tensor self;
- bool non_blocking;
- bool copy;
- pop(stack, self, non_blocking, copy);
- c10::optional<c10::Device> device = c10::nullopt;
- c10::optional<at::ScalarType> scalarType = c10::nullopt;
- push(
- stack,
- to_dispatch(self, device, scalarType, non_blocking, copy));
- return 0;
- };
+ [](Stack& stack) {
+ at::Tensor self;
+ bool non_blocking;
+ bool copy;
+ pop(stack, self, non_blocking, copy);
+ c10::optional<c10::Device> device = c10::nullopt;
+ c10::optional<at::ScalarType> scalarType = c10::nullopt;
+ push(
+ stack, to_dispatch(self, device, scalarType, non_blocking, copy));
+ return 0;
}),
Operator(
"aten::eq(Device a, Device b) -> bool",
- [](const Node* node) -> Operation {
- return [](Stack& stack) {
- auto a = pop(stack).toDevice();
- auto b = pop(stack).toDevice();
- push(stack, a == b);
- return 0;
- };
+ [](Stack& stack) {
+ auto a = pop(stack).toDevice();
+ auto b = pop(stack).toDevice();
+ push(stack, a == b);
+ return 0;
}),
Operator(
"prim::device(Tensor a) -> Device",
- [](const Node* node) -> Operation {
- return [](Stack& stack) {
- push(stack, pop(stack).toTensor().device());
- return 0;
- };
+ [](Stack& stack) {
+ push(stack, pop(stack).toTensor().device());
+ return 0;
}),
Operator(
"prim::dtype(Tensor a) -> int",
- [](const Node* node) -> Operation {
- return [](Stack& stack) {
- at::Tensor a;
- pop(stack, a);
- push(stack, static_cast<int64_t>(a.scalar_type()));
- return 0;
- };
+ [](Stack& stack) {
+ at::Tensor a;
+ pop(stack, a);
+ push(stack, static_cast<int64_t>(a.scalar_type()));
+ return 0;
}),
Operator(
"prim::requires_grad(Tensor a) -> bool",
- [](const Node* node) -> Operation {
- return [](Stack& stack) {
- at::Tensor a;
- pop(stack, a);
- push(stack, a.requires_grad());
- return 0;
- };
+ [](Stack& stack) {
+ at::Tensor a;
+ pop(stack, a);
+ push(stack, a.requires_grad());
+ return 0;
}),
Operator(
"prim::shape(Tensor a) -> int[]",
- [](const Node* node) -> Operation {
- return [](Stack& stack) {
- at::Tensor a;
- pop(stack, a);
- push(stack, a.sizes());
- return 0;
- };
+ [](Stack& stack) {
+ at::Tensor a;
+ pop(stack, a);
+ push(stack, a.sizes());
+ return 0;
}),
Operator(
"prim::is_cuda(Tensor a) -> bool",
- [](const Node* node) -> Operation {
- return [](Stack& stack) {
- at::Tensor a;
- pop(stack, a);
- push(stack, a.is_cuda());
- return 0;
- };
+ [](Stack& stack) {
+ at::Tensor a;
+ pop(stack, a);
+ push(stack, a.is_cuda());
+ return 0;
}),
Operator(
"aten::cpu(Tensor(a) self) -> Tensor(a|b)",
- [](const Node* node) -> Operation {
- return [](Stack& stack) {
- at::Tensor a;
- pop(stack, a);
- push(stack, a.cpu());
- return 0;
- };
+ [](Stack& stack) {
+ at::Tensor a;
+ pop(stack, a);
+ push(stack, a.cpu());
+ return 0;
}),
Operator(
"aten::cuda(Tensor(a) self) -> Tensor(a|b)",
- [](const Node* node) -> Operation {
- return [](Stack& stack) {
- at::Tensor a;
- pop(stack, a);
- push(stack, a.cuda());
- return 0;
- };
+ [](Stack& stack) {
+ at::Tensor a;
+ pop(stack, a);
+ push(stack, a.cuda());
+ return 0;
}),
Operator(
"prim::Undefined() -> Tensor",
Operator(
"prim::RaiseException(str msg) -> ()",
- [](const Node* node) -> Operation {
- return [](Stack& stack) {
- throw JITException(pop(stack).toStringRef());
- return 0;
- };
+ [](Stack& stack) {
+ throw JITException(pop(stack).toStringRef());
+ return 0;
}),
Operator(
"prim::IgnoredPythonOp(...) -> ()",
- [](const Node* node) -> Operation {
- return [](Stack& stack) {
- throw JITException(
- "This Python function is annotated to be ignored"
- " and cannot be and has not been included in the exported"
- " binary, meaning that it cannot be executed now."
- " Make sure that ignored operations are never executed after"
- " import");
- return 0;
- };
+ [](Stack& stack) {
+ throw JITException(
+ "This Python function is annotated to be ignored"
+ " and cannot be and has not been included in the exported"
+ " binary, meaning that it cannot be executed now."
+ " Make sure that ignored operations are never executed after"
+ " import");
+ return 0;
}),
// Load x, y
}),
Operator(
"aten::_grad_sum_to_size(Tensor(a) self, int[] size) -> Tensor(a)",
- [](const Node* node) {
- return [=](Stack& stack) {
- at::Tensor self;
- Shared<IntList> desired_sizes;
- pop(stack, self, desired_sizes);
- push(stack, at::sum_to(std::move(self), desired_sizes->elements()));
- return 0;
- };
+ [](Stack& stack) {
+ at::Tensor self;
+ Shared<IntList> desired_sizes;
+ pop(stack, self, desired_sizes);
+ push(stack, at::sum_to(std::move(self), desired_sizes->elements()));
+ return 0;
}),
Operator(
prim::TupleUnpack,
}),
Operator(
"aten::_unwrap_optional(t(a)? optional) -> t(a)",
- [](const Node* node) -> Operation {
- return [=](Stack& stack) {
- auto val = pop(stack);
- AT_CHECK(!val.isNone(), "Unwrapping null optional");
- push(stack, val);
- return 0;
- };
+ [](Stack& stack) {
+ auto val = pop(stack);
+ AT_CHECK(!val.isNone(), "Unwrapping null optional");
+ push(stack, val);
+ return 0;
}),
// This op can be removed in preprocessing before being run in the
// interpreter (but is currently not removed), even when it is removed it
}),
Operator(
"aten::wait(Future(t) self) -> t",
- [](const Node* node) {
- return [=](Stack& stack) {
- auto future = pop(stack).toFuture();
- if (future->completed()) {
- push(stack, future->value());
- } else {
- throw Suspend(future);
- }
- return 0;
- };
+ [](Stack& stack) {
+ auto future = pop(stack).toFuture();
+ if (future->completed()) {
+ push(stack, future->value());
+ } else {
+ throw Suspend(future);
+ }
+ return 0;
}),
});
#define DEFINE_GENERIC_OP(aten_op, int_op, float_op, int_result, float_result) \
Operator( \
#aten_op "(int a, int b) -> " #int_result, \
- [](const Node* node) { \
- return [=](Stack& stack) { \
- int64_t a, b; \
- pop(stack, a, b); \
- push(stack, int_op); \
- return 0; \
- }; \
+ [](Stack& stack) { \
+ int64_t a, b; \
+ pop(stack, a, b); \
+ push(stack, int_op); \
+ return 0; \
}), \
Operator( \
- #aten_op "(float a, float b) -> " #float_result, \
- [](const Node* node) { \
- return [=](Stack& stack) { \
- double a, b; \
- pop(stack, a, b); \
- push(stack, float_op); \
- return 0; \
- }; \
+ #aten_op "(float a, float b) -> " #float_result, [](Stack& stack) { \
+ double a, b; \
+ pop(stack, a, b); \
+ push(stack, float_op); \
+ return 0; \
})
-#define DEFINE_INT_FLOAT_OP(aten_op, op, result) \
- Operator( \
- #aten_op "(int a, float b) -> " #result, \
- [](const Node* node) { \
- return [=](Stack& stack) { \
- int64_t a; \
- double b; \
- pop(stack, a, b); \
- push(stack, op); \
- return 0; \
- }; \
- }), \
- Operator(#aten_op "(float a, int b) -> " #result, [](const Node* node) { \
- return [=](Stack& stack) { \
- double a; \
- int64_t b; \
- pop(stack, a, b); \
- push(stack, op); \
- return 0; \
- }; \
+#define DEFINE_INT_FLOAT_OP(aten_op, op, result) \
+ Operator( \
+ #aten_op "(int a, float b) -> " #result, \
+ [](Stack& stack) { \
+ int64_t a; \
+ double b; \
+ pop(stack, a, b); \
+ push(stack, op); \
+ return 0; \
+ }), \
+ Operator(#aten_op "(float a, int b) -> " #result, [](Stack& stack) { \
+ double a; \
+ int64_t b; \
+ pop(stack, a, b); \
+ push(stack, op); \
+ return 0; \
})
-#define DEFINE_INT_OP(aten_op, op) \
- Operator(#aten_op "(int a, int b) -> int", [](const Node* node) { \
- return [=](Stack& stack) { \
- int64_t a, b; \
- pop(stack, a, b); \
- push(stack, op); /* NOLINT(hicpp-signed-bitwise) */ \
- return 0; \
- }; \
+#define DEFINE_INT_OP(aten_op, op) \
+ Operator(#aten_op "(int a, int b) -> int", [](Stack& stack) { \
+ int64_t a, b; \
+ pop(stack, a, b); \
+ push(stack, op); /* NOLINT(hicpp-signed-bitwise) */ \
+ return 0; \
})
#define DEFINE_BINARY_OP(aten_op, op) \
#define DEFINE_COMPARISON_OP(aten_op, op) \
DEFINE_GENERIC_OP(aten_op, op, op, bool, bool), \
DEFINE_INT_FLOAT_OP(aten_op, op, bool)
-#define DEFINE_BOOL_OP(aten_op, op) \
- Operator(#aten_op "(bool a, bool b) -> bool", [](const Node* node) { \
- return [=](Stack& stack) { \
- bool a, b; \
- pop(stack, a, b); \
- push(stack, op); \
- return 0; \
- }; \
+#define DEFINE_BOOL_OP(aten_op, op) \
+ Operator(#aten_op "(bool a, bool b) -> bool", [](Stack& stack) { \
+ bool a, b; \
+ pop(stack, a, b); \
+ push(stack, op); \
+ return 0; \
})
// Convert an python index (which may be negative) into an index usable for a
}
template <typename TList, typename TElement>
-Operation listAppend(const Node* node) {
- return [](Stack& stack) {
- TList a;
- TElement el;
- pop(stack, a, el);
+int listAppend(Stack& stack) {
+ TList a;
+ TElement el;
+ pop(stack, a, el);
- a->elements().push_back(el);
- push(stack, a);
+ a->elements().push_back(el);
+ push(stack, a);
- return 0;
- };
+ return 0;
}
template <typename TList>
}
template <typename T>
-Operation listSelect(const Node* node) {
- return [=](Stack& stack) {
- T list;
- int64_t idx;
- pop(stack, list, idx);
+int listSelect(Stack& stack) {
+ T list;
+ int64_t idx;
+ pop(stack, list, idx);
- auto element = getItem(list, idx);
- push(stack, std::move(element));
- return 0;
- };
+ auto element = getItem(list, idx);
+ push(stack, std::move(element));
+ return 0;
}
// needs specialization because cannot return a pointer to a bool in an array
template <>
-Operation listSelect<Shared<BoolList>>(const Node* node) {
- return [=](Stack& stack) {
- Shared<BoolList> list;
- int64_t idx;
- pop(stack, list, idx);
+int listSelect<Shared<BoolList>>(Stack& stack) {
+ Shared<BoolList> list;
+ int64_t idx;
+ pop(stack, list, idx);
- auto element = getBoolItem(list->elements(), idx);
- push(stack, std::move(element));
- return 0;
- };
+ auto element = getBoolItem(list->elements(), idx);
+ push(stack, std::move(element));
+ return 0;
}
template <typename T>
-Operation listLen(const Node* node) {
- return [=](Stack& stack) {
- T a;
- pop(stack, a);
- const int64_t size = a->elements().size();
- push(stack, size);
- return 0;
- };
+int listLen(Stack& stack) {
+ T a;
+ pop(stack, a);
+
+ const int64_t size = a->elements().size();
+ push(stack, size);
+ return 0;
}
template <typename T>
-Operation listEq(const Node* node) {
- return [=](Stack& stack) {
- T a;
- T b;
- pop(stack, a, b);
- push(stack, a->elements() == b->elements() ? true : false);
- return 0;
- };
+int listEq(Stack& stack) {
+ T a;
+ T b;
+ pop(stack, a, b);
+ push(stack, a->elements() == b->elements() ? true : false);
+ return 0;
}
template <typename T>
-Operation listNe(const Node* node) {
- return [=](Stack& stack) {
- T a;
- T b;
- pop(stack, a, b);
- push(stack, !(a->elements() == b->elements()));
- return 0;
- };
+int listNe(Stack& stack) {
+ T a;
+ T b;
+ pop(stack, a, b);
+ push(stack, !(a->elements() == b->elements()));
+ return 0;
}
inline bool tensor_list_equal(Shared<TensorList> a, Shared<TensorList> b) {
// Specialization for at::Tensor, since it doesn't define operator==
template <>
-Operation listEq<Shared<TensorList>>(const Node* node) {
- return [=](Stack& stack) {
- Shared<TensorList> a;
- Shared<TensorList> b;
- pop(stack, a, b);
- push(stack, tensor_list_equal(a, b));
- return 0;
- };
+int listEq<Shared<TensorList>>(Stack& stack) {
+ Shared<TensorList> a;
+ Shared<TensorList> b;
+ pop(stack, a, b);
+ push(stack, tensor_list_equal(a, b));
+ return 0;
}
// Specialization for at::Tensor, since it doesn't define operator==
template <>
-Operation listNe<Shared<TensorList>>(const Node* node) {
- return [=](Stack& stack) {
- Shared<TensorList> a;
- Shared<TensorList> b;
- pop(stack, a, b);
- push(stack, !tensor_list_equal(a, b));
- return 0;
- };
+int listNe<Shared<TensorList>>(Stack& stack) {
+ Shared<TensorList> a;
+ Shared<TensorList> b;
+ pop(stack, a, b);
+ push(stack, !tensor_list_equal(a, b));
+ return 0;
}
Operation listList(const Node* node) {
}
template <class TList, class TElement>
-Operation listAdd(const Node* node) {
- return [=](Stack& stack) {
- TList a;
- TList b;
- pop(stack, a, b);
-
- std::vector<TElement> ret;
- const auto total_size = a->elements().size() + b->elements().size();
- ret.reserve(total_size);
- for (const auto& a_element : a->elements()) {
- ret.push_back(a_element);
- }
- for (const auto& b_element : b->elements()) {
- ret.push_back(b_element);
- }
+int listAdd(Stack& stack) {
+ TList a;
+ TList b;
+ pop(stack, a, b);
+
+ std::vector<TElement> ret;
+ const auto total_size = a->elements().size() + b->elements().size();
+ ret.reserve(total_size);
+ for (const auto& a_element : a->elements()) {
+ ret.push_back(a_element);
+ }
+ for (const auto& b_element : b->elements()) {
+ ret.push_back(b_element);
+ }
- push(stack, ret);
- return 0;
- };
+ push(stack, ret);
+ return 0;
}
template <typename TList, typename TElement>
-Operation listSlice(const Node* node) {
- return [](Stack& stack) {
- TList list;
- int64_t start;
- int64_t end;
- int64_t step;
-
- pop(stack, list, start, end, step);
- const int64_t list_size = list->elements().size();
-
- // clamp start and end to the bounds of the list
- const auto normalized_start =
- std::max((int64_t)0, normalizeIndex(start, list_size));
- const auto normalized_end =
- std::min(list_size, normalizeIndex(end, list_size));
-
- std::vector<TElement> sliced_list;
- if (normalized_end <= normalized_start) {
- // early exit if the slice is trivially empty
- push(stack, sliced_list);
- return 0;
- }
+int listSlice(Stack& stack) {
+ TList list;
+ int64_t start;
+ int64_t end;
+ int64_t step;
- sliced_list.reserve(normalized_end - normalized_start);
+ pop(stack, list, start, end, step);
+ const int64_t list_size = list->elements().size();
- for (auto i = normalized_start; i < normalized_end;) {
- sliced_list.push_back(list->elements()[i]);
- i += step;
- }
+ // clamp start and end to the bounds of the list
+ const auto normalized_start =
+ std::max((int64_t)0, normalizeIndex(start, list_size));
+ const auto normalized_end =
+ std::min(list_size, normalizeIndex(end, list_size));
+ std::vector<TElement> sliced_list;
+ if (normalized_end <= normalized_start) {
+ // early exit if the slice is trivially empty
push(stack, sliced_list);
return 0;
- };
+ }
+
+ sliced_list.reserve(normalized_end - normalized_start);
+
+ for (auto i = normalized_start; i < normalized_end;) {
+ sliced_list.push_back(list->elements()[i]);
+ i += step;
+ }
+
+ push(stack, sliced_list);
+ return 0;
}
template <typename TList, typename TElement>
-Operation listSetItem(const Node* node) {
- return [](Stack& stack) {
- TList list;
- int64_t idx;
- TElement value;
+int listSetItem(Stack& stack) {
+ TList list;
+ int64_t idx;
+ TElement value;
- pop(stack, list, idx, value);
- getItem(list, idx) = value;
+ pop(stack, list, idx, value);
+ getItem(list, idx) = value;
- push(stack, list);
- return 0;
- };
+ push(stack, list);
+ return 0;
}
template <>
-Operation listSetItem<Shared<BoolList>, bool>(const Node* node) {
- return [](Stack& stack) {
- Shared<BoolList> list;
- int64_t idx;
- bool value;
+int listSetItem<Shared<BoolList>, bool>(Stack& stack) {
+ Shared<BoolList> list;
+ int64_t idx;
+ bool value;
- pop(stack, list, idx, value);
+ pop(stack, list, idx, value);
- int64_t list_size = list->elements().size();
- auto normalized_idx = normalizeIndex(idx, list_size);
- if (normalized_idx < 0 || normalized_idx >= list_size) {
- throw std::out_of_range("list index out of range");
- }
- list->elements()[normalized_idx] = value;
+ int64_t list_size = list->elements().size();
+ auto normalized_idx = normalizeIndex(idx, list_size);
+ if (normalized_idx < 0 || normalized_idx >= list_size) {
+ throw std::out_of_range("list index out of range");
+ }
+ list->elements()[normalized_idx] = value;
- push(stack, list);
- return 0;
- };
+ push(stack, list);
+ return 0;
}
int dictLen(Stack& stack) {
RegisterOperators reg2({
-#define DEFINE_STRING_OP(op_name, string_op, result) \
- Operator(#op_name "(str a, str b) ->" #result, [](const Node* node) { \
- return [=](Stack& stack) { \
- auto b = pop(stack).toStringRef(); \
- auto a = pop(stack).toStringRef(); \
- push(stack, string_op); \
- return 0; \
- }; \
+#define DEFINE_STRING_OP(op_name, string_op, result) \
+ Operator(#op_name "(str a, str b) ->" #result, [](Stack& stack) { \
+ auto b = pop(stack).toStringRef(); \
+ auto a = pop(stack).toStringRef(); \
+ push(stack, string_op); \
+ return 0; \
})
DEFINE_STRING_OP(aten::eq, a == b, bool),
#define CREATE_COPY_OP(other_type, c_type) \
Operator( \
"aten::copy_(Tensor(a!) self, " #other_type " other) -> Tensor(a!)", \
- [](const Node* node) { \
- return [=](Stack& stack) { \
- at::Tensor t; \
- c_type other; \
- pop(stack, t, other); \
- std::move(t) = other; /* NOLINT(bugprone-use-after-move) */ \
- push(stack, std::move(t)); /* NOLINT(bugprone-use-after-move) */ \
- return 0; \
- }; \
+ [](Stack& stack) { \
+ at::Tensor t; \
+ c_type other; \
+ pop(stack, t, other); \
+ std::move(t) = other; /* NOLINT(bugprone-use-after-move) */ \
+ push(stack, std::move(t)); /* NOLINT(bugprone-use-after-move) */ \
+ return 0; \
})
CREATE_COPY_OP(Tensor, at::Tensor),
// NB: This is the python truediv operation
Operator(
"aten::div(int a, int b) -> float",
- [](const Node* node) {
- return [=](Stack& stack) {
- int64_t a, b;
- pop(stack, a, b);
- push(stack, static_cast<double>(a) / static_cast<double>(b));
- return 0;
- };
+ [](Stack& stack) {
+ int64_t a, b;
+ pop(stack, a, b);
+ push(stack, static_cast<double>(a) / static_cast<double>(b));
+ return 0;
}),
Operator(
"aten::div(float a, float b) -> float",
- [](const Node* node) {
- return [=](Stack& stack) {
- double a, b;
- pop(stack, a, b);
- push(stack, a / b);
- return 0;
- };
+ [](Stack& stack) {
+ double a, b;
+ pop(stack, a, b);
+ push(stack, a / b);
+ return 0;
}),
Operator(
"aten::floor(float a) -> int",
- [](const Node* node) {
- return [=](Stack& stack) {
- double a;
- pop(stack, a);
- push(stack, static_cast<int64_t>(std::floor(a)));
- return 0;
- };
+ [](Stack& stack) {
+ double a;
+ pop(stack, a);
+ push(stack, static_cast<int64_t>(std::floor(a)));
+ return 0;
}),
DEFINE_COMPARISON_OP(aten::ne, a != b),
Operator(
"aten::neg(int self) -> int",
- [](const Node* node) {
- return [=](Stack& stack) {
- push(stack, -pop(stack).toInt());
- return 0;
- };
+ [](Stack& stack) {
+ push(stack, -pop(stack).toInt());
+ return 0;
}),
Operator(
"aten::neg(float self) -> float",
- [](const Node* node) {
- return [=](Stack& stack) {
- push(stack, -pop(stack).toDouble());
- return 0;
- };
+ [](Stack& stack) {
+ push(stack, -pop(stack).toDouble());
+ return 0;
}),
Operator(
"aten::__not__(bool self) -> bool",
- [](const Node* node) {
- return [=](Stack& stack) {
- push(stack, !pop(stack).toBool());
- return 0;
- };
+ [](Stack& stack) {
+ push(stack, !pop(stack).toBool());
+ return 0;
}),
Operator(
"aten::__is__(t1 self, t2 obj) -> bool",
- [](const Node* node) {
- return [=](Stack& stack) {
- IValue self, obj;
- pop(stack, self, obj);
- push(stack, self.isSameIdentity(obj));
- return 0;
- };
+ [](Stack& stack) {
+ IValue self, obj;
+ pop(stack, self, obj);
+ push(stack, self.isSameIdentity(obj));
+ return 0;
}),
Operator(
"aten::__isnot__(t1 self, t2 obj) -> bool",
- [](const Node* node) {
- return [=](Stack& stack) {
- IValue self, obj;
- pop(stack, self, obj);
- push(stack, !self.isSameIdentity(obj));
- return 0;
- };
+ [](Stack& stack) {
+ IValue self, obj;
+ pop(stack, self, obj);
+ push(stack, !self.isSameIdentity(obj));
+ return 0;
}),
Operator(
"aten::_tensor_to_list(Tensor self) -> int[]",
- [](const Node* node) {
- return [=](Stack& stack) {
- at::Tensor t;
- pop(stack, t);
- std::vector<int64_t> elems;
- elems.reserve(t.size(0));
- for (int i = 0; i < t.size(0); i++) {
- elems.push_back(*t[i].data<int32_t>());
- }
- push(stack, jit::IntList::create(elems));
- return 0;
- };
+ [](Stack& stack) {
+ at::Tensor t;
+ pop(stack, t);
+ std::vector<int64_t> elems;
+ elems.reserve(t.size(0));
+ for (int i = 0; i < t.size(0); i++) {
+ elems.push_back(*t[i].data<int32_t>());
+ }
+ push(stack, jit::IntList::create(elems));
+ return 0;
}),
Operator(
"aten::_list_to_tensor(int[] self) -> Tensor",
- [](const Node* node) {
- return [=](Stack& stack) {
- std::vector<int64_t> l;
- pop(stack, l);
- auto t = torch::empty(
- {static_cast<int64_t>(l.size())}, at::dtype(at::kInt));
- for (size_t i = 0; i < l.size(); i++) {
- t[i] = l[i];
- }
- push(stack, t);
- return 0;
- };
+ [](Stack& stack) {
+ std::vector<int64_t> l;
+ pop(stack, l);
+ auto t = torch::empty(
+ {static_cast<int64_t>(l.size())}, at::dtype(at::kInt));
+ for (size_t i = 0; i < l.size(); i++) {
+ t[i] = l[i];
+ }
+ push(stack, t);
+ return 0;
}),
#define CREATE_DICT_OPS(key_type) \
Operator("aten::len(Dict(" key_type ", t) self) -> int", dictLen), \