c10::Dispatcher::singleton().callBoxed(*this, stack);
}
+ void callBoxed(Stack& stack) const {
+ callBoxed(&stack);
+ }
+
void redispatchBoxed(DispatchKeySet ks, Stack* stack) const {
c10::Dispatcher::singleton().redispatchBoxed(*this, ks, stack);
}
#pragma once
+#include <type_traits>
+
#include <ATen/core/ivalue.h>
+#include <c10/util/Deprecated.h>
// TODO move this to c10 namespace
using c10::IValue;
using Stack = std::vector<IValue>;
-using Operation = std::function<void(Stack*)>;
+
+class Operation {
+ template <typename F, typename Arg>
+ using accepts = std::is_constructible<std::function<void(Arg)>, F&&>;
+
+ public:
+ template <typename F,
+ std::enable_if_t<accepts<F, Stack*>::value, int> = 0>
+ C10_DEPRECATED_MESSAGE("Please use void(Stack&) to register operator instead.")
+ Operation(F&& raw): op_([raw = std::forward<F>(raw)](Stack& stack) {
+ raw(&stack);
+ }) {}
+
+ template <typename F,
+ std::enable_if_t<accepts<F, Stack&>::value &&
+ !std::is_same<std::decay_t<F>, Operation>::value, int> = 0>
+ Operation(F&& op): op_(std::forward<F>(op)) {}
+
+ Operation(std::nullptr_t) noexcept {}
+
+ explicit operator bool() const noexcept {
+ return op_ ? true : false;
+ }
+
+ void operator()(Stack& stack) {
+ op_(stack);
+ }
+
+ template <typename T>
+ T* target() noexcept {
+ return op_.target<T>();
+ }
+
+ private:
+ std::function<void(Stack&)> op_;
+};
// An operation with N inputs and M outputs pops the last N inputs off
// the stack and pushes its M inputs onto the stack
#include <gtest/gtest.h>
#include <torch/csrc/autograd/generated/variable_factories.h>
+#include <torch/csrc/jit/frontend/ir_emitter.h>
+#include <torch/csrc/jit/ir/alias_analysis.h>
#include <torch/csrc/jit/ir/irparser.h>
-#include "torch/csrc/jit/frontend/ir_emitter.h"
-#include "torch/csrc/jit/ir/alias_analysis.h"
-#include "torch/csrc/jit/runtime/custom_operator.h"
-#include "torch/csrc/utils/memory.h"
+#include <torch/csrc/jit/runtime/custom_operator.h>
+#include <torch/csrc/utils/memory.h>
namespace torch {
namespace jit {
TEST(WriteTrackingTest, Basic) {
RegisterOperators reg({Operator(
"prim::creates_alias(Tensor(a) x) -> Tensor(a)",
- [](Stack* s) {},
+ [](Stack&) {},
aliasAnalysisFromSchema())});
const auto creates_alias = Symbol::fromQualString("prim::creates_alias");
auto graph = std::make_shared<Graph>();
RegisterOperators reg(
{Operator(
"prim::returns_wildcard(Tensor a) -> Tensor(*)",
- [](Stack* stack) {},
+ [](Stack&) {},
aliasAnalysisFromSchema()),
Operator(
"prim::writes(Tensor(z!) a) -> Tensor(a)",
- [](Stack* stack) {},
+ [](Stack&) {},
aliasAnalysisFromSchema())});
const auto returns_wildcard =
Symbol::fromQualString("prim::returns_wildcard");
Stack stack;
push(stack, 2.0f, at::ones(5));
- op->getOperation()(&stack);
+ op->getOperation()(stack);
at::Tensor output;
pop(stack, output);
Stack stack;
push(stack, 2.0f, at::ones(5));
- op->getOperation()(&stack);
+ op->getOperation()(stack);
at::Tensor output;
pop(stack, output);
c10::List<c10::complex<double>>(
{c10::complex<double>(2.4, -5.5), c10::complex<double>(-1.3, 2)}));
push(stack, c10::List<at::Tensor>({at::ones(5)}));
- op->getOperation()(&stack);
+ op->getOperation()(stack);
c10::List<double> output;
pop(stack, output);
Stack stack;
push(stack, c10::List<at::Tensor>({at::ones(5)}));
- op->getOperation()(&stack);
+ op->getOperation()(stack);
c10::List<at::Tensor> output;
pop(stack, output);
torch::jit::RegisterOperators reg({OperatorGenerator(
TORCH_SELECTIVE_NAME_IN_SCHEMA(
op_list, "foofoo::not_exist(float a, Tensor b) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
double a;
at::Tensor b;
torch::jit::RegisterOperators reg({OperatorGenerator(
TORCH_SELECTIVE_NAME_IN_SCHEMA(
op_list, "foofoo::bar.template(float a, Tensor b) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
double a;
at::Tensor b;
Stack stack;
push(stack, 2.0f, at::ones(5));
- op->getOperation()(&stack);
+ op->getOperation()(stack);
at::Tensor output;
pop(stack, output);
RegisterOperators reg({
Operator(
"prim::test_none() -> int?",
- [](Stack* stack) { push(stack, IValue()); },
+ [](Stack& stack) { push(stack, IValue()); },
aliasAnalysisFromSchema()),
Operator(
"prim::is_none(int? a) -> bool",
- [](Stack* stack) {
+ [](Stack& stack) {
IValue a = pop(stack);
if (a.isNone()) {
push(stack, true);
RegisterOperators reg({
Operator(
"aten::test_vartype(t[] a, t b) -> (t)",
- [](Stack* stack) {
+ [](Stack& stack) {
c10::List<double> list;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
double a;
RegisterOperators reg({
Operator(
"aten::test_vartype2(t a, t[] b) -> (t[])",
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
double a;
c10::List<double> list;
// because it always produces empty Tensors.
Operator(
"prim::MakeTestTensor() -> Tensor",
- [](Stack* stack) { push(stack, at::Tensor()); },
+ [](Stack& stack) { push(stack, at::Tensor()); },
aliasAnalysisFromSchema()),
});
} // namespace
torch::jit::Stack stack;
torch::jit::push(stack, std::forward<Args>(args)...);
- op->getOperation()(&stack);
+ op->getOperation()(stack);
TORCH_INTERNAL_ASSERT(1 == stack.size());
return torch::jit::pop(stack).to<Result>();
jit::RegisterOperators reg_fut_ops({
jit::Operator(
"profiler::_call_end_callbacks_on_jit_fut(Tensor x, Future(t) y) -> Future(t)",
- [](jit::Stack* stack) {
+ [](jit::Stack& stack) {
// Pop inputs, which should be a future and a tensor
auto fut = jit::pop(stack).toFuture();
auto tensor = jit::pop(stack).toTensor();
std::vector<c10::Stream> streams) const {
c10::MultiStreamGuard guard(streams);
try {
- op.getOperation()(&stack);
+ op.getOperation()(stack);
} catch (const std::exception&) {
return asFuture(std::current_exception());
}
Operator(
prim::CudaFusionGroup,
[](const Node* node) -> Operation {
- return [node](Stack* stack) {
- fuser::cuda::runFusionGroup(node, *stack);
+ return [node](Stack& stack) {
+ fuser::cuda::runFusionGroup(node, stack);
};
},
aliasAnalysisSpecialCase()),
// if we would ever return refined tensor, which would change aliasing
// analysis, we should update aliasdb pass.
[](const Node* node) -> Operation {
- return [node](Stack* stack) {
+ return [node](Stack& stack) {
// TODO: check latency here!!!!
std::vector<TypePtr> types = node->tys(attr::types);
const auto num_inputs = types.size();
[](const Node* node) -> Operation {
int64_t dim = node->i(attr::dim);
int64_t num_inputs = node->inputs().size();
- return [dim, num_inputs](Stack* stack) {
+ return [dim, num_inputs](Stack& stack) {
auto result = at::cat(
fmap(
last(stack, num_inputs),
auto jit_op = findOperatorFor(opname);
std::vector<c10::Argument> args;
if (jit_op) {
- fn = [jit_op](Stack& stack) { jit_op->getOperation()(&stack); };
+ fn = [jit_op](Stack& stack) { jit_op->getOperation()(stack); };
args = jit_op->schema().arguments();
} else {
auto op = c10::Dispatcher::singleton().findSchema(opname_c10);
RegisterOperators mm_tree_reduction_reg({Operator(
"prim::MMTreeReduce(...) -> Tensor",
- [](Stack* stack) {
+ [](Stack& stack) {
auto num_inputs = pop(stack).toInt();
std::vector<at::Tensor> inputs;
inputs.reserve(num_inputs);
- for (auto it = stack->end() - num_inputs; it != stack->end(); ++it) {
+ for (auto it = stack.end() - num_inputs; it != stack.end(); ++it) {
inputs.push_back(std::move(*it).toTensor());
}
drop(stack, num_inputs);
[](const Node* node) -> Operation {
size_t num_other_side_inputs = node->inputs().size() - 1;
Side single_side = static_cast<Side>(node->i(Symbol::attr("side")));
- return [num_other_side_inputs, single_side](Stack* stack) {
+ return [num_other_side_inputs, single_side](Stack& stack) {
at::Tensor side_input;
std::vector<at::Tensor> other_side_inputs;
other_side_inputs.reserve(num_other_side_inputs);
- for (auto it = stack->end() - num_other_side_inputs; it != stack->end();
+ for (auto it = stack.end() - num_other_side_inputs; it != stack.end();
++it) {
other_side_inputs.push_back(std::move(*it).toTensor());
}
mm_out,
num_other_side_inputs,
/*dim=*/single_side == Side::LHS ? 1 : 0);
- stack->insert(
- stack->end(),
+ stack.insert(
+ stack.end(),
std::make_move_iterator(outputs.begin()),
std::make_move_iterator(outputs.end()));
} else {
if (single_side == Side::LHS) {
for (at::Tensor& other : other_side_inputs) {
- stack->emplace_back(side_input.mm(other));
+ stack.emplace_back(side_input.mm(other));
}
} else {
for (at::Tensor& other : other_side_inputs) {
- stack->emplace_back(other.mm(side_input));
+ stack.emplace_back(other.mm(side_input));
}
}
}
try {
auto op = n->getOperation();
- op(&stack);
+ op(stack);
} catch (...) {
return c10::nullopt;
}
RegisterOperators reg_ops(
{Operator(
"aten::_ncf_unsqueeze(Tensor(a) self, int ndim) -> Tensor(a)",
- [](Stack* stack) {
+ [](Stack& stack) {
const int64_t ndim = pop(stack).toInt();
auto self = pop(stack).toTensor();
c10::SmallVector<int64_t, 8> sizes(ndim, 1);
aliasAnalysisFromSchema()),
Operator(
"aten::_ncf_view(Tensor(a) self, int[] input_shape, int normalized_ndim) -> Tensor(a)",
- [](Stack* stack) {
+ [](Stack& stack) {
const int64_t normalized_ndim = pop(stack).toInt();
auto input_shape = pop(stack).toIntList();
auto self = pop(stack).toTensor();
Operation createUnaryOp(
std::function<void(at::Tensor output, at::Tensor input)> aten_op,
bool inplace = false) {
- return [aten_op, inplace](Stack* stack) {
+ return [aten_op, inplace](Stack& stack) {
auto a = pop(stack).toTensor();
c10::impl::ExcludeDispatchKeyGuard edkg(c10::autograd_dispatch_keyset);
// we cast `a` to an `ideep::tensor`, so we can get at its descriptor
};
}
-void MKLDNNLayerNormOp(Stack* stack, bool inplace) {
+void MKLDNNLayerNormOp(Stack& stack, bool inplace) {
c10::impl::ExcludeDispatchKeyGuard edkg(c10::autograd_dispatch_keyset);
// enable_cudnn not used
};
Operation BroadOp(const Node* node) {
- return [](Stack* stack) {
+ return [](Stack& stack) {
auto b = pop(stack).toTensor();
auto a = pop(stack).toTensor();
auto b_size = b.sizes();
const RegisterOperators MKLDNNLayerNormOpReg({
torch::jit::Operator(
"prim::MKLDNNLayerNorm(Tensor input, int[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor",
- [](Stack* stack) { MKLDNNLayerNormOp(stack, false); },
+ [](Stack& stack) { MKLDNNLayerNormOp(stack, false); },
AliasAnalysisKind::FROM_SCHEMA),
torch::jit::Operator(
"prim::MKLDNNLayerNorm_(Tensor(a!) input, int[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor(a!)",
- [](Stack* stack) { MKLDNNLayerNormOp(stack, true); },
+ [](Stack& stack) { MKLDNNLayerNormOp(stack, true); },
AliasAnalysisKind::FROM_SCHEMA),
});
Operation ConstantMKLDNNTensorOp(const Node* node) {
const auto& t = node->t(attr::value);
- return [t](Stack* stack) {
+ return [t](Stack& stack) {
push(stack, t);
return 0;
};
// XXX: this follows the schema convention of conv2d/conv3d, not
// aten::mkldnn_convolution, which is different for some reason!
"prim::mkldnn_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor",
- [](jit::Stack* stack) {
+ [](jit::Stack& stack) {
int64_t groups = pop(stack).toInt();
auto dilation = pop(stack).toIntVector();
auto padding = pop(stack).toIntVector();
// in default bindings
jit::Operator(
"prim::MKLDNNScalarMul(Tensor self, Scalar other) -> Tensor",
- [](jit::Stack* stack) {
+ [](jit::Stack& stack) {
c10::impl::ExcludeDispatchKeyGuard edkg(
c10::autograd_dispatch_keyset);
float other = pop(stack).toScalar().toFloat();
aliasAnalysisFromSchema()),
jit::Operator(
"prim::MKLDNNScalarMul_(Tensor(a!) self, Scalar other) -> Tensor(a!)",
- [](jit::Stack* stack) {
+ [](jit::Stack& stack) {
c10::impl::ExcludeDispatchKeyGuard edkg(
c10::autograd_dispatch_keyset);
float other = pop(stack).toScalar().toFloat();
// is to uncover any mistakes we could make when editing this code,
// and eventually it shouldn't matter, because this phase should be
// preceded by schema checking.
- op(&stack);
+ op(stack);
AT_ASSERT(stack.size() == node->outputs().size());
for (const auto i : c10::irange(stack.size())) {
Operation createTensorExprOp(const Node* node) {
auto kernel =
std::make_shared<tensorexpr::TensorExprKernel>(node->g(attr::Subgraph));
- return [kernel](Stack* stack) {
+ return [kernel](Stack& stack) {
RECORD_FUNCTION("TensorExpr", std::vector<c10::IValue>());
- kernel->run(*stack);
+ kernel->run(stack);
return 0;
};
}
const auto inputsDeepCopy = deepCopy(stack);
// Run the op
- node->getOperation()(&stack);
+ node->getOperation()(stack);
const auto outputs = std::move(stack);
Stack stack = std::get<1>(opWithStack);
{
pybind11::gil_scoped_release no_gil_guard;
- found_op->getOperation()(&stack);
+ found_op->getOperation()(stack);
}
return createPyObjectForStack(std::move(stack));
AT_ASSERT(op->outputs().size() == 1);
- return [=](Stack* stack) {
+ return [=](Stack& stack) {
pybind11::gil_scoped_acquire gil;
py::tuple py_inputs(op->cconv.size());
size_t i = 0;
drop(stack, num_inputs);
try {
py::object py_output(func(*py_inputs));
- stack->push_back(returnToIValue(op->output()->type(), py_output));
+ stack.push_back(returnToIValue(op->output()->type(), py_output));
} catch (py::error_already_set& e) {
throw std::runtime_error(e.what());
}
num_outputs(this->grad.f->outputs().size()) {}
// XXX: keep in mind that stack can be larger than the inputs we need!
- void operator()(Stack* stack) const {
+ void operator()(Stack& stack) const {
auto grad_fn = std::make_shared<DifferentiableGraphBackward>(
grad_executor,
grad.df_input_vjps.size(),
captureInputs(*grad_fn, inputs);
}
- detachVariables(*stack);
+ detachVariables(stack);
if (IsNewExecutorEnabled()) {
ExecutionPlan plan =
- f_ptr->getPlanFor(*stack, GraphExecutor::getDefaultNumBailOuts());
- InterpreterState(plan.code).run(*stack);
+ f_ptr->getPlanFor(stack, GraphExecutor::getDefaultNumBailOuts());
+ InterpreterState(plan.code).run(stack);
} else {
- InterpreterState(legacy_f).run(*stack);
+ InterpreterState(legacy_f).run(stack);
}
{
// drop the temporary outputs so that we return the same number of
// outputs as if we were not also calculating gradient
const size_t num_temporary_outputs = num_outputs - grad.f_real_outputs;
- stack->erase(stack->end() - num_temporary_outputs, stack->end());
+ stack.erase(stack.end() - num_temporary_outputs, stack.end());
}
}
}
case INST(OP): {
INST_GUARD;
- frame.function->operator_table_[inst.X](&stack);
+ frame.function->operator_table_[inst.X](stack);
}
INST_NEXT;
case INST(OPN): {
INST_GUARD;
stack.push_back(inst.N);
- frame.function->operator_table_[inst.X](&stack);
+ frame.function->operator_table_[inst.X](stack);
}
INST_NEXT;
case INST(LOAD): {
namespace {
Operator createOperatorFromC10(const c10::OperatorHandle& op) {
- return Operator(op, [op](Stack* stack) { op.callBoxed(stack); });
+ return Operator(op, [op](Stack& stack) { op.callBoxed(stack); });
}
class RegistrationListener final : public c10::OpRegistrationListener {
RegisterOperators const reg({
Operator(
"cuda::current_stream.device(Device? device) -> __torch__.torch.classes.cuda.Stream",
- [](Stack* stack) {
+ [](Stack& stack) {
auto device = pop(stack).toOptional<c10::Device>();
c10::DeviceIndex device_index = device.has_value()
? device->index()
aliasAnalysisFromSchema()),
Operator(
"cuda::current_stream.int(int? val) -> __torch__.torch.classes.cuda.Stream",
- [](Stack* stack) {
+ [](Stack& stack) {
auto idx = pop(stack).toOptional<int64_t>();
c10::DeviceIndex device_index = idx.has_value()
? static_cast<c10::DeviceIndex>(idx.value())
aliasAnalysisFromSchema()),
Operator(
"cuda::default_stream.device(Device? device) -> __torch__.torch.classes.cuda.Stream",
- [](Stack* stack) {
+ [](Stack& stack) {
auto device = pop(stack).toOptional<c10::Device>();
c10::DeviceIndex device_index = device.has_value()
? device->index()
aliasAnalysisFromSchema()),
Operator(
"cuda::default_stream.int(int? val) -> __torch__.torch.classes.cuda.Stream",
- [](Stack* stack) {
+ [](Stack& stack) {
auto idx = pop(stack).toOptional<int64_t>();
c10::DeviceIndex device_index = idx.has_value()
? static_cast<c10::DeviceIndex>(idx.value())
aliasAnalysisFromSchema()),
Operator(
"cuda::_current_device() -> int",
- [](Stack* stack) {
+ [](Stack& stack) {
auto v = c10::cuda::current_device();
push(stack, static_cast<int>(v));
},
aliasAnalysisFromSchema()),
Operator(
"cuda::_set_device(int64_t val) -> ()",
- [](Stack* stack) {
+ [](Stack& stack) {
int64_t idx = -1;
pop(stack, idx);
c10::cuda::set_device(static_cast<c10::DeviceIndex>(idx));
aliasAnalysisFromSchema()),
Operator(
"cuda::device_index(Device device) -> int",
- [](Stack* stack) {
+ [](Stack& stack) {
auto device = pop(stack);
auto idx = device.toDevice().index();
push(stack, idx);
aliasAnalysisFromSchema()),
Operator(
"cuda::device_count() -> int",
- [](Stack* stack) { push(stack, at::cuda::device_count()); },
+ [](Stack& stack) { push(stack, at::cuda::device_count()); },
aliasAnalysisFromSchema()),
Operator(
"cuda::set_stream(__torch__.torch.classes.cuda.Stream stream) -> ()",
- [](Stack* stack) {
+ [](Stack& stack) {
auto v = pop(stack);
auto s = v.toCustomClass<torch::jit::CUDAStream>();
auto stream_device_idx = static_cast<int64_t>(s->device_index());
aliasAnalysisFromSchema()),
Operator(
"cuda::synchronize() -> ()",
- [](Stack* stack) { c10::cuda::device_synchronize(); },
+ [](Stack& stack) { c10::cuda::device_synchronize(); },
aliasAnalysisFromSchema()),
Operator(
"cuda::synchronize.device(Device? device) -> ()",
- [](Stack* stack) {
+ [](Stack& stack) {
auto device = pop(stack).toOptional<c10::Device>();
c10::DeviceIndex device_index = device.has_value()
? device->index()
aliasAnalysisFromSchema()),
Operator(
"cuda::synchronize.int(int? val) -> ()",
- [](Stack* stack) {
+ [](Stack& stack) {
auto idx = pop(stack).toOptional<int64_t>();
c10::DeviceIndex device_index = idx.has_value()
? static_cast<c10::DeviceIndex>(idx.value())
// prepare the rpc input arguments and call the C++ impls
void prepare_and_call_rpc_op(
- Stack* stack,
+ Stack& stack,
int num_inputs,
const std::string& rpc_op) {
// Get inputs from the stack.
- auto stackIter = stack->end() - num_inputs;
+ auto stackIter = stack.end() - num_inputs;
auto& dstWorkerIValue = *stackIter++;
auto& qualifiedNameIValue = *stackIter++;
IValue emptyTuple(c10::ivalue::Tuple::create({}));
rpcTimeout);
// Push output to the stack.
drop(stack, num_inputs);
- stack->emplace_back(std::move(futureIValuePtr));
+ stack.emplace_back(std::move(futureIValuePtr));
} else if (rpc_op == "rpc_sync") {
// Send RPC request.
auto futureIValuePtr = dist_rpc::rpcTorchscript(
auto res = futureIValuePtr->value();
// Push output to the stack.
drop(stack, num_inputs);
- stack->emplace_back(std::move(res));
+ stack.emplace_back(std::move(res));
}
} else if (rpc_op == "rpc_remote") {
auto rrefPtr = dist_rpc::remoteTorchscript(
rpcTimeout);
// Push output to the stack.
drop(stack, num_inputs);
- stack->emplace_back(
+ stack.emplace_back(
c10::static_intrusive_pointer_cast<c10::RRefInterface>(rrefPtr));
} else {
throw std::runtime_error(
fmt::format(
"aten::to_here(RRef(t) self, float timeout = {}) -> t(*)",
torch::distributed::rpc::kDefaultRpcTimeoutSeconds),
- [](Stack* stack) {
+ [](Stack& stack) {
auto timeout = pop(stack).toDouble();
auto rref = pop(stack).toRRef();
IValue res;
aliasAnalysisFromSchema()),
Operator(
"aten::local_value(RRef(t) self) -> t(*)",
- [](Stack* stack) {
+ [](Stack& stack) {
auto rref = pop(stack).toRRef();
TORCH_CHECK(
rref->isOwner(),
aliasAnalysisFromSchema()),
Operator(
"aten::is_owner(RRef(t) self) -> bool",
- [](Stack* stack) {
+ [](Stack& stack) {
auto rref = pop(stack).toRRef();
push(stack, rref->isOwner());
},
aliasAnalysisFromSchema()),
Operator(
"aten::owner(RRef(t) self) -> __torch__.torch.classes.dist_rpc.WorkerInfo",
- [](Stack* stack) {
+ [](Stack& stack) {
auto rref = pop(stack).toRRef();
push(
stack,
aliasAnalysisFromSchema()),
Operator(
"aten::owner_name(RRef(t) self) -> str",
- [](Stack* stack) {
+ [](Stack& stack) {
auto rref = pop(stack).toRRef();
push(stack, rref->ownerName());
},
aliasAnalysisFromSchema()),
Operator(
"aten::confirmed_by_owner(RRef(t) self) -> bool",
- [](Stack* stack) {
+ [](Stack& stack) {
auto rref = pop(stack).toRRef();
push(stack, rref->confirmedByOwner());
},
aliasAnalysisFromSchema()),
Operator(
"aten::dist_backward(int context_id, Tensor[] roots, bool retain_graph=False) -> ()",
- [](Stack* stack) {
+ [](Stack& stack) {
bool retain_graph = pop(stack).toBool();
auto roots_list = pop(stack).toTensorList();
int64_t context_id = pop(stack).toInt();
prim::rpc_sync,
[](const Node* node) -> Operation {
int num_inputs = node->inputs().size();
- return [num_inputs](Stack* stack) {
+ return [num_inputs](Stack& stack) {
prepare_and_call_rpc_op(stack, num_inputs, "rpc_sync");
};
},
prim::rpc_remote,
[](const Node* node) -> Operation {
int num_inputs = node->inputs().size();
- return [num_inputs](Stack* stack) {
+ return [num_inputs](Stack& stack) {
prepare_and_call_rpc_op(stack, num_inputs, "rpc_remote");
};
},
prim::rpc_async,
[](const Node* node) -> Operation {
int num_inputs = node->inputs().size();
- return [num_inputs](Stack* stack) {
+ return [num_inputs](Stack& stack) {
prepare_and_call_rpc_op(stack, num_inputs, "rpc_async");
};
},
}
template <>
-void listIndex<at::Tensor>(Stack* stack) {
+void listIndex<at::Tensor>(Stack& stack) {
at::Tensor elem = pop(stack).to<at::Tensor>();
c10::List<at::Tensor> list = pop(stack).to<c10::List<at::Tensor>>();
}
template <>
-void listCount<at::Tensor>(Stack* stack) {
+void listCount<at::Tensor>(Stack& stack) {
at::Tensor elem = pop(stack).to<at::Tensor>();
c10::List<at::Tensor> list = pop(stack).to<c10::List<at::Tensor>>();
}
template <>
-void listEq<at::Tensor>(Stack* stack) {
+void listEq<at::Tensor>(Stack& stack) {
c10::List<at::Tensor> b = pop(stack).to<c10::List<at::Tensor>>();
c10::List<at::Tensor> a = pop(stack).to<c10::List<at::Tensor>>();
push(stack, tensor_list_equal(a, b));
}
template <>
-void listNe<at::Tensor>(Stack* stack) {
+void listNe<at::Tensor>(Stack& stack) {
c10::List<at::Tensor> b = pop(stack).to<c10::List<at::Tensor>>();
c10::List<at::Tensor> a = pop(stack).to<c10::List<at::Tensor>>();
push(stack, !tensor_list_equal(a, b));
}
template <>
-void listSort<at::Tensor>(Stack* stack) {
+void listSort<at::Tensor>(Stack& stack) {
bool reverse = pop(stack).toBool();
c10::List<at::Tensor> list = pop(stack).toTensorList();
std::sort(
}
template <>
-void listCopyAndSort<at::Tensor>(Stack* stack) {
+void listCopyAndSort<at::Tensor>(Stack& stack) {
c10::List<at::Tensor> list = pop(stack).toTensorList();
auto list_copied = list.copy();
std::sort(
}
template <>
-void listRemove<at::Tensor>(Stack* stack) {
+void listRemove<at::Tensor>(Stack& stack) {
at::Tensor elem = pop(stack).to<at::Tensor>();
c10::List<at::Tensor> list = pop(stack).to<c10::List<at::Tensor>>();
return idx;
}
-void listAppend(Stack* stack) {
+void listAppend(Stack& stack) {
IValue el = pop(stack).to<IValue>();
c10::List<IValue> list = pop(stack).to<c10::List<IValue>>();
push(stack, std::move(list));
}
-void listReverse(Stack* stack) {
+void listReverse(Stack& stack) {
c10::List<IValue> list = pop(stack).to<c10::List<IValue>>();
std::reverse(list.begin(), list.end());
}
-void listPopImpl(Stack* stack, const char* empty_message) {
+void listPopImpl(Stack& stack, const char* empty_message) {
int64_t idx = pop(stack).to<int64_t>();
c10::List<IValue> list = pop(stack).to<c10::List<IValue>>();
list.erase(list.begin() + normalized_idx);
}
-void listPop(Stack* stack) {
+void listPop(Stack& stack) {
return listPopImpl(stack, "pop from empty list");
}
-void listClear(Stack* stack) {
+void listClear(Stack& stack) {
c10::List<IValue> list = pop(stack).to<c10::List<IValue>>();
list.clear();
}
-void listDelete(Stack* stack) {
+void listDelete(Stack& stack) {
listPopImpl(stack, "pop index out of range");
pop(stack);
}
-void listInsert(Stack* stack) {
+void listInsert(Stack& stack) {
IValue elem = pop(stack).to<IValue>();
int64_t idx = pop(stack).to<int64_t>();
c10::List<IValue> list = pop(stack).to<c10::List<IValue>>();
}
}
-void listExtend(Stack* stack) {
+void listExtend(Stack& stack) {
c10::List<IValue> b = pop(stack).to<c10::List<IValue>>();
c10::List<IValue> a = pop(stack).to<c10::List<IValue>>();
}
}
-void listCopy(Stack* stack) {
+void listCopy(Stack& stack) {
c10::List<IValue> list = pop(stack).to<c10::List<IValue>>();
push(stack, list.copy());
}
-void listSelect(Stack* stack) {
+void listSelect(Stack& stack) {
int64_t idx = pop(stack).to<int64_t>();
c10::List<IValue> list = pop(stack).to<c10::List<IValue>>();
push(stack, std::move(element));
}
-void listLen(Stack* stack) {
+void listLen(Stack& stack) {
c10::List<IValue> a = pop(stack).to<c10::List<IValue>>();
const int64_t size = a.size();
push(stack, size);
}
-void listList(Stack* stack) {
+void listList(Stack& stack) {
c10::List<IValue> a = pop(stack).to<c10::List<IValue>>();
push(stack, a.copy());
}
-void listAdd(Stack* stack) {
+void listAdd(Stack& stack) {
c10::List<IValue> b = pop(stack).to<c10::List<IValue>>();
c10::List<IValue> a = pop(stack).to<c10::List<IValue>>();
push(stack, std::move(ret));
}
-void listInplaceAdd(Stack* stack) {
+void listInplaceAdd(Stack& stack) {
c10::List<IValue> b = pop(stack).to<List<IValue>>();
c10::List<IValue> a = pop(stack).to<List<IValue>>();
a.append(std::move(b));
push(stack, std::move(a));
}
-void listMulIntLeftInPlace(Stack* stack) {
+void listMulIntLeftInPlace(Stack& stack) {
int64_t n = pop(stack).to<int64_t>();
c10::List<IValue> list = pop(stack).to<c10::List<IValue>>();
if (n <= 0) {
push(stack, std::move(list));
}
-void listMulIntLeft(Stack* stack) {
+void listMulIntLeft(Stack& stack) {
int64_t n = pop(stack).to<int64_t>();
c10::List<IValue> list = pop(stack).to<c10::List<IValue>>();
push(stack, std::move(ret));
}
-void listMulIntRight(Stack* stack) {
+void listMulIntRight(Stack& stack) {
c10::List<IValue> list = pop(stack).to<c10::List<IValue>>();
int64_t n = pop(stack).to<int64_t>();
push(stack, std::move(ret));
}
-void listSlice(Stack* stack) {
+void listSlice(Stack& stack) {
auto step_val = pop(stack);
auto end_val = pop(stack);
auto start_val = pop(stack);
push(stack, std::move(sliced_list));
}
-void listSetItem(Stack* stack) {
+void listSetItem(Stack& stack) {
IValue value = pop(stack).to<IValue>();
int64_t idx = pop(stack).to<int64_t>();
c10::List<IValue> list = pop(stack).to<c10::List<IValue>>();
template <>
c10::impl::GenericList make_result_list<IValue>(const TypePtr& elemType);
-inline void noop(Stack* n) {}
+inline void noop(Stack& n) {}
// As described in https://docs.python.org/3/library/functions.html#round
// When a number is exactly halfway between two integers, python builtin round
list.set(normalized_idx, std::forward<T>(value));
}
-void listAppend(Stack* stack);
+void listAppend(Stack& stack);
-void listReverse(Stack* stack);
+void listReverse(Stack& stack);
template <typename T>
-void minList(Stack* stack) {
+void minList(Stack& stack) {
c10::List<T> a = pop(stack).to<c10::List<T>>();
c10::List<T> b = pop(stack).to<c10::List<T>>();
}
template <typename T>
-void maxList(Stack* stack) {
+void maxList(Stack& stack) {
c10::List<T> a = pop(stack).to<c10::List<T>>();
c10::List<T> b = pop(stack).to<c10::List<T>>();
push(stack, b.size() > a.size() ? b : a);
}
-void listPopImpl(Stack* stack, const char* empty_message);
+void listPopImpl(Stack& stack, const char* empty_message);
-void listPop(Stack* stack);
+void listPop(Stack& stack);
-void listClear(Stack* stack);
+void listClear(Stack& stack);
-void listDelete(Stack* stack);
+void listDelete(Stack& stack);
-void listInsert(Stack* stack);
+void listInsert(Stack& stack);
template <typename T>
-void listRemove(Stack* stack) {
+void listRemove(Stack& stack) {
T elem = pop(stack).to<T>();
c10::List<T> list = pop(stack).to<c10::List<T>>();
}
template <typename T>
-void listMin(Stack* stack) {
+void listMin(Stack& stack) {
c10::List<T> list = pop(stack).to<c10::List<T>>();
size_t list_size = list.size();
if (list_size == 0) {
min_elem = elem < min_elem ? elem : min_elem;
}
- stack->push_back(min_elem);
+ stack.push_back(min_elem);
}
template <typename T>
-void listMax(Stack* stack) {
+void listMax(Stack& stack) {
c10::List<T> list = pop(stack).to<c10::List<T>>();
size_t list_size = list.size();
if (list_size == 0) {
max_elem = elem > max_elem ? elem : max_elem;
}
- stack->push_back(max_elem);
+ stack.push_back(max_elem);
}
template <>
-void listRemove<at::Tensor>(Stack* stack);
+void listRemove<at::Tensor>(Stack& stack);
template <typename T>
-void listIndex(Stack* stack) {
+void listIndex(Stack& stack) {
T elem = pop(stack).to<T>();
c10::List<T> list = pop(stack).to<c10::List<T>>();
}
template <>
-void listIndex<at::Tensor>(Stack* stack);
+void listIndex<at::Tensor>(Stack& stack);
template <typename T>
-void listCount(Stack* stack) {
+void listCount(Stack& stack) {
T elem = pop(stack).to<T>();
c10::List<T> list = pop(stack).to<c10::List<T>>();
}
template <>
-void listCount<at::Tensor>(Stack* stack);
+void listCount<at::Tensor>(Stack& stack);
-void listExtend(Stack* stack);
+void listExtend(Stack& stack);
-void listCopy(Stack* stack);
+void listCopy(Stack& stack);
-void listSelect(Stack* stack);
+void listSelect(Stack& stack);
-void listLen(Stack* stack);
+void listLen(Stack& stack);
template <typename T>
-void listEq(Stack* stack) {
+void listEq(Stack& stack) {
c10::List<T> b = pop(stack).to<c10::List<T>>();
c10::List<T> a = pop(stack).to<c10::List<T>>();
push(stack, a == b);
}
template <typename T>
-void listNe(Stack* stack) {
+void listNe(Stack& stack) {
c10::List<T> b = pop(stack).to<c10::List<T>>();
c10::List<T> a = pop(stack).to<c10::List<T>>();
push(stack, a != b);
// Specialization for at::Tensor, since it doesn't define operator==
template <>
-void listEq<at::Tensor>(Stack* stack);
+void listEq<at::Tensor>(Stack& stack);
// Specialization for at::Tensor, since it doesn't define operator==
template <>
-void listNe<at::Tensor>(Stack* stack);
+void listNe<at::Tensor>(Stack& stack);
-void listList(Stack* stack);
+void listList(Stack& stack);
template <typename T>
-void listContains(Stack* stack) {
+void listContains(Stack& stack) {
auto key = pop(stack).to<T>();
auto list = pop(stack).to<c10::List<T>>();
// NOLINTNEXTLINE(performance-implicit-conversion-in-loop)
push(stack, false);
}
-void listAdd(Stack* stack);
+void listAdd(Stack& stack);
-void listInplaceAdd(Stack* stack);
+void listInplaceAdd(Stack& stack);
-void listMulIntLeftInPlace(Stack* stack);
+void listMulIntLeftInPlace(Stack& stack);
-void listMulIntLeft(Stack* stack);
+void listMulIntLeft(Stack& stack);
-void listMulIntRight(Stack* stack);
+void listMulIntRight(Stack& stack);
-void listSlice(Stack* stack);
+void listSlice(Stack& stack);
template <typename T>
-void listSort(Stack* stack) {
+void listSort(Stack& stack) {
bool reverse = pop(stack).toBool();
c10::List<T> list = pop(stack).to<c10::List<T>>();
std::sort(list.begin(), list.end(), [reverse](const T& a, const T& b) {
// Specialization for at::Tensor
template <>
-void listSort<at::Tensor>(Stack* stack);
+void listSort<at::Tensor>(Stack& stack);
template <typename T>
-void listCopyAndSort(Stack* stack) {
+void listCopyAndSort(Stack& stack) {
c10::List<T> list = pop(stack).to<c10::List<T>>();
auto list_copied = list.copy();
std::sort(list_copied.begin(), list_copied.end(), [](const T& a, const T& b) {
// Specialization for at::Tensor
template <>
-void listCopyAndSort<at::Tensor>(Stack* stack);
+void listCopyAndSort<at::Tensor>(Stack& stack);
-void listSetItem(Stack* stack);
+void listSetItem(Stack& stack);
struct OperatorGeneratorArgs {
const char* schema_str;
bool isOperationCreator;
union {
- void (*operation)(Stack*);
+ void (*operation)(Stack&);
OperationCreator operationCreator;
};
AliasAnalysisKind aliasAnalysis;
explicit constexpr OperatorGeneratorArgs(
torch::detail::SelectiveStr<true> schema_str,
- void (*op)(Stack*),
+ void (*op)(Stack&),
AliasAnalysisKind aa)
: schema_str(schema_str),
isOperationCreator(false),
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op \
".int_int(int a, int b) -> " #int_float_result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
int64_t a, b; \
pop(stack, a, b); \
push(stack, op); \
TORCH_SELECTIVE_SCHEMA( \
#aten_op \
".float_float(float a, float b) -> " #int_float_result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
double a, b; \
pop(stack, a, b); \
push(stack, op); \
TORCH_SELECTIVE_SCHEMA( \
#aten_op \
".complex_complex(complex a, complex b) -> " #complex_result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
c10::complex<double> a, b; \
pop(stack, a, b); \
push(stack, op); \
#define DEFINE_GENERIC_OP(aten_op, int_op, float_op, int_result, float_result) \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op ".int(int a, int b) -> " #int_result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
int64_t a, b; \
pop(stack, a, b); \
push(stack, int_op); \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA( \
#aten_op ".float(float a, float b) -> " #float_result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
double a, b; \
pop(stack, a, b); \
push(stack, float_op); \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op \
".int_float(int a, float b) -> " #result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
int64_t a; \
double b; \
pop(stack, a, b); \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op \
".float_int(float a, int b) -> " #result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
double a; \
int64_t b; \
pop(stack, a, b); \
#define DEFINE_INT_OP(aten_op, op) \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op ".int(int a, int b) -> int"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
int64_t a, b; \
pop(stack, a, b); \
push(stack, op); /* NOLINT(hicpp-signed-bitwise) */ \
#define DEFINE_STR_CMP_OP(aten_op, op) \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op ".str(str a, str b) -> bool"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
auto b = pop(stack).toStringRef(); \
auto a = pop(stack).toStringRef(); \
push(stack, op); \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op string_val \
"(Scalar a, Scalar b) -> " #result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
IValue x, y; \
pop(stack, x, y); \
if (x.isDouble()) { \
#define DEFINE_UNARY_INT_OP(aten_op, op, result) \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op ".int(int a) -> " #result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
int64_t a; \
pop(stack, a); \
push(stack, op); \
#define DEFINE_UNARY_FLOAT_OP(aten_op, op, result) \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op ".float(float a) -> " #result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
double a; \
pop(stack, a); \
push(stack, op); \
DEFINE_UNARY_FLOAT_OP(aten_op, op, float_result), \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op ".Scalar(Scalar a) -> Scalar"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
IValue x; \
pop(stack, x); \
if (x.isDouble()) { \
#define DEFINE_BOOL_OP(aten_op, op) \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op ".bool(bool a, bool b) -> bool"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
bool a, b; \
pop(stack, a, b); \
push(stack, op); \
#define DEFINE_STRING_OP(op_name, string_op, result) \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#op_name ".str(str a, str b) ->" #result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
auto b = pop(stack).toStringRef(); \
auto a = pop(stack).toStringRef(); \
push(stack, string_op); \
#define DEFINE_UNARY_COMPLEX_OP(aten_op, op, result) \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op ".complex(complex a) -> " #result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
c10::complex<double> a; \
pop(stack, a); \
push(stack, op); \
DEFINE_UNARY_COMPLEX_OP(aten_op, op, complex_result), \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op ".Scalar(Scalar a) -> Scalar"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
IValue x; \
pop(stack, x); \
if (x.isDouble()) { \
complex_result) \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op ".int(int a, int b) -> " #int_result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
int64_t a, b; \
pop(stack, a, b); \
push(stack, int_op); \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA( \
#aten_op ".complex(complex a, complex b) -> " #complex_result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
c10::complex<double> a, b; \
pop(stack, a, b); \
push(stack, complex_op); \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA( \
#aten_op ".float(float a, float b) -> " #float_result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
double a, b; \
pop(stack, a, b); \
push(stack, float_op); \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op \
".int_complex(int a, complex b) -> " #result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
int64_t a; \
c10::complex<double> b; \
pop(stack, a, b); \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA( \
#aten_op ".complex_int(complex a, int b) -> " #result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
c10::complex<double> a; \
int64_t b; \
pop(stack, a, b); \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA( \
#aten_op ".float_complex(float a, complex b) -> " #result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
double a; \
c10::complex<double> b; \
pop(stack, a, b); \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA( \
#aten_op ".complex_float(complex a, float b) -> " #result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
c10::complex<double> a; \
double b; \
pop(stack, a, b); \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op string_val \
"(Scalar a, Scalar b) -> " #result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
IValue x, y; \
pop(stack, x, y); \
if (x.isComplexDouble()) { \
aten_op, int_op, float_op, complex_op, result) \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#aten_op "(Scalar a, Scalar b) -> " #result), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
IValue x, y; \
pop(stack, x, y); \
if (x.isComplexDouble()) { \
static const OperatorGeneratorArgs opGenArgs[] = {
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::str(t elem) -> str"),
- [](Stack* stack) {
+ [](Stack& stack) {
std::stringstream ss;
ss << pop(stack);
push(stack, ss.str());
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::list(str t) -> str[]"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto str = pop(stack).toStringRef();
c10::List<std::string> chars;
chars.reserve(str.size());
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::cpu(Tensor(a) self) -> Tensor(a|b)"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.cpu());
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::layout(Tensor a) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.layout());
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::__range_length(int lo, int hi, int step) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int64_t lo, hi, step;
pop(stack, lo, hi, step);
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::__derive_index(int index, int start, int step) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int64_t index, start, step;
pop(stack, index, start, step);
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::TupleUnpack(Any tup) -> ..."),
- [](Stack* stack) { tupleUnpack(*stack); },
+ [](Stack& stack) { tupleUnpack(stack); },
aliasAnalysisSpecialCase()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::unchecked_cast(t x) -> t"),
aliasAnalysisSpecialCase()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::IntImplicit(Tensor a) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
checkImplicitTensorToNum(a, /*to int*/ true);
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::ComplexImplicit(Tensor a) -> complex"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
checkImplicitTensorToNum(a, /*to int*/ false);
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::FloatImplicit(Tensor a) -> float"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
checkImplicitTensorToNum(a, /*to int*/ false);
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::ScalarImplicit(Tensor a) -> Scalar"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
checkImplicitTensorToNum(a, /*to int*/ false);
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Bool.Tensor(Tensor a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.is_nonzero());
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Bool.int(int a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int64_t i;
pop(stack, i);
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Bool.float(float a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
double d;
pop(stack, d);
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Int.Tensor(Tensor a) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.item<int64_t>());
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Int.bool(bool a) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
bool b;
pop(stack, b);
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Int.float(float a) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
double d;
pop(stack, d);
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Int.Scalar(Scalar a) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue scalar;
pop(stack, scalar);
if (scalar.isInt()) {
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Int.str(str a) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto s = pop(stack).toString();
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
std::string::size_type sz;
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Float.Tensor(Tensor a) -> float"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.item<double>());
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Float.Scalar(Scalar a) -> float"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue scalar;
pop(stack, scalar);
if (scalar.isDouble()) {
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Float.int(int a) -> float"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int64_t i;
pop(stack, i);
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Float.bool(bool a) -> float"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
bool b;
pop(stack, b);
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Float.str(str a) -> float"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto s = pop(stack).toString();
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
std::string::size_type sz;
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Complex.Scalar(Scalar a) -> complex"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue scalar;
pop(stack, scalar);
if (scalar.isComplexDouble()) {
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::Complex.Tensor_Tensor(Tensor a, Tensor b) -> complex"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a, b;
pop(stack, a, b);
push(stack, c10::complex<double>(a.item<double>(), b.item<double>()));
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::format(str self, ...) -> str"),
- [](Stack* stack) {
+ [](Stack& stack) {
size_t num_inputs = pop(stack).toInt();
- format(*stack, num_inputs);
+ format(stack, num_inputs);
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::einsum.sublist(Tensor a, ...) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
size_t num_inputs = pop(stack).toInt();
- einsum(*stack, num_inputs);
+ einsum(stack, num_inputs);
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::NumToTensor.Scalar(Scalar a) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Scalar s;
pop(stack, s);
push(stack, at::scalar_to_tensor(s));
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::RaiseException(str msg) -> ()"),
- [](Stack* stack) { throw JITException(pop(stack).toStringRef()); },
+ [](Stack& stack) { throw JITException(pop(stack).toStringRef()); },
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::Size(int[] sizes) -> int[]"),
- [](Stack* stack) {},
+ [](Stack& stack) {},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::size(Tensor self) -> int[]"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto t = std::move(pop(stack)).toTensor();
pack(stack, t.sizes().vec());
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::EnumName(AnyEnumType enum) -> str"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue e = pop(stack);
push(stack, e.toEnumHolder()->name());
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::EnumValue.int(AnyEnumType enum) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue e = pop(stack);
push(stack, e.toEnumHolder()->value());
},
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"prim::EnumValue.float(AnyEnumType enum) -> float"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue e = pop(stack);
push(stack, e.toEnumHolder()->value());
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::EnumValue.str(AnyEnumType enum) -> str"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue e = pop(stack);
push(stack, e.toEnumHolder()->value());
},
// note the compiler knows to type TupleIndex more accurately than it
// is listed here.
TORCH_SELECTIVE_SCHEMA("prim::TupleIndex(Any tup, int i) -> Any"),
- [](Stack* stack) {
+ [](Stack& stack) {
int64_t index = pop(stack).toInt();
auto tuple = pop(stack).toTuple();
auto norm_index = normalizeIndex(index, tuple->elements().size());
norm_index > static_cast<int64_t>(tuple->elements().size())) {
throw std::out_of_range("Tuple list index out of range");
}
- stack->emplace_back(tuple->elements()[norm_index]);
+ stack.emplace_back(tuple->elements()[norm_index]);
},
aliasAnalysisSpecialCase()),
OperatorGeneratorArgs(
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::device(Tensor a) -> Device"),
- [](Stack* stack) { push(stack, pop(stack).toTensor().device()); },
+ [](Stack& stack) { push(stack, pop(stack).toTensor().device()); },
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::dtype(Tensor a) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, static_cast<int64_t>(a.scalar_type()));
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::__not__(bool self) -> bool"),
- [](Stack* stack) { push(stack, !pop(stack).toBool()); },
+ [](Stack& stack) { push(stack, !pop(stack).toBool()); },
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::__is__(t1 self, t2 obj) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue self, obj;
pop(stack, self, obj);
push(stack, self.is(obj));
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::__isnot__(t1 self, t2 obj) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue self, obj;
pop(stack, self, obj);
push(stack, !self.is(obj));
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::element_size(Tensor self) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor arg = pop(stack).toTensor();
push(stack, arg.element_size());
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::numel(Tensor self) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor arg = pop(stack).toTensor();
push(stack, arg.numel());
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::dim(Tensor self) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor arg = pop(stack).toTensor();
push(stack, arg.dim());
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::get_device(Tensor self) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
RECORD_FUNCTION("get_device", std::vector<c10::IValue>());
auto result =
at::get_device((std::move(peek(stack, 0, 1))).toTensor());
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::storage_offset(Tensor self) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
RECORD_FUNCTION("storage_offset", std::vector<c10::IValue>());
auto result =
((std::move(peek(stack, 0, 1))).toTensor()).storage_offset();
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::is_contiguous(Tensor self) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
RECORD_FUNCTION("is_contiguous", std::vector<c10::IValue>());
auto result =
((std::move(peek(stack, 0, 1))).toTensor()).is_contiguous();
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::eq.device(Device a, Device b) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto a = pop(stack).toDevice();
auto b = pop(stack).toDevice();
push(stack, a == b);
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::ne.device(Device a, Device b) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto a = pop(stack).toDevice();
auto b = pop(stack).toDevice();
push(stack, a != b);
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::eq.bool(bool a, bool b) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto a = pop(stack);
auto b = pop(stack);
push(stack, a == b);
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::ne.bool(bool a, bool b) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto a = pop(stack);
auto b = pop(stack);
push(stack, a != b);
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::Uninitialized() -> Any"),
- [](Stack* stack) { push(stack, IValue::uninitialized()); },
+ [](Stack& stack) { push(stack, IValue::uninitialized()); },
aliasAnalysisSpecialCase()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::Print(...) -> ()"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto num_inputs = pop(stack).toInt();
std::stringstream ss;
bool first = true;
// prim::VarConcat(Tensors..., dim) -> Tensor
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::VarConcat(...) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto num_inputs = pop(stack).toInt();
auto dim = pop(stack).toInt();
std::vector<at::Tensor> inputs(num_inputs - 1);
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::VarStack(...) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto num_inputs = pop(stack).toInt();
auto dim = pop(stack).toInt();
std::vector<at::Tensor> inputs(num_inputs - 1);
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::eq.enum(AnyEnumType a, AnyEnumType b) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue x = pop(stack);
IValue y = pop(stack);
push(stack, x == y);
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::ne.enum(AnyEnumType a, AnyEnumType b) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue x = pop(stack);
IValue y = pop(stack);
push(stack, x != y);
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::dequantize.tensor(Tensor qtensor) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor qtensor;
pop(stack, qtensor);
push(stack, at::dequantize(qtensor));
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::dequantize.list(Tensor[] qtensors) -> Tensor[]"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto qtensors = pop(stack).toTensorVector();
push(stack, at::dequantize(qtensors));
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::dequantize.any(Any tensors) -> Any"),
- [](Stack* stack) { dequantize(*stack); },
+ [](Stack& stack) { dequantize(stack); },
aliasAnalysisFromSchema()),
DEFINE_UNARY_OP_WITH_COMPLEX(aten::log, std::log(a), float, float),
DEFINE_STRING_OP(aten::add, a + b, str),
float),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::pow.int_to_int(int a, int b) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int64_t a, b;
pop(stack, a, b);
DEFINE_BINARY_OP(prim::max, a > b ? a : b),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::type(Device self) -> str"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto d = pop(stack);
push(
stack, DeviceTypeName(d.toDevice().type(), /* lower_case=*/true));
// tensor length op (size of 1st dimension)
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::len.Tensor(Tensor t) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor t = pop(stack).toTensor();
if (t.dim() == 0) {
AT_ERROR("len() of a 0-d tensor");
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::ord(str string) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto string = pop(stack).toStringRef();
TORCH_CHECK(
string.size() == 1,
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::lower(str self) -> str"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto string = pop(stack).toStringRef();
std::stringstream ss;
for (char c : string) {
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::len.str(str s) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto string = pop(stack).toStringRef();
push(stack, static_cast<int64_t>(string.size()));
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::dict() -> Dict(str, Tensor)"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto dict =
c10::impl::GenericDict(StringType::get(), TensorType::get());
push(stack, dict);
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::__getitem__.str(str s, int index) -> str"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto index = pop(stack).toInt();
auto string = pop(stack).toStringRef();
auto norm_index = normalizeIndex(index, string.size());
TORCH_SELECTIVE_SCHEMA("aten::copy_." #other_type \
"(Tensor(a!) self, " #other_type \
" other) -> Tensor(a!)"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
at::Tensor t; \
c_type other; \
pop(stack, t, other); \
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::backward(Tensor self, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> ()"),
- [](Stack* stack) {
+ [](Stack& stack) {
bool create_graph = pop(stack).toBool();
auto retain_graph = pop(stack).toOptional<bool>();
IValue gradient_ivalue = pop(stack);
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::index.Tensor_hacked_twin(Tensor self, Tensor[] indices) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto indices = pop(stack).to<List<c10::optional<at::Tensor>>>();
auto self = pop(stack).toTensor();
auto result = at::index(self, indices);
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::_index_put_impl_.hacked_twin(Tensor(a!) self, Tensor[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!)"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto unsafe = pop(stack).toBool();
auto accumulate = pop(stack).toBool();
auto values = pop(stack).toTensor();
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::index_put_.hacked_twin(Tensor(a!) self, Tensor[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto accumulate = pop(stack).toBool();
auto values = pop(stack).toTensor();
auto indices = pop(stack).to<List<c10::optional<at::Tensor>>>();
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::index_put.hacked_twin(Tensor self, Tensor[] indices, Tensor values, bool accumulate=False) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto accumulate = pop(stack).toBool();
auto values = pop(stack).toTensor();
auto indices = pop(stack).to<List<c10::optional<at::Tensor>>>();
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::to.prim_Device(Tensor(a) self, Device? device, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a|b)"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
bool non_blocking;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::to.prim_dtype(Tensor(a) self, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a|b)"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
bool non_blocking;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::is_cuda(Tensor a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.is_cuda());
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::is_xpu(Tensor a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.is_xpu());
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::data(Tensor(a) a) -> Tensor(a)"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, autograd::Variable(a).variable_data());
#define DEFINE_STRING_IS_OP(op_name, char_op) \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#op_name "(str self) -> bool"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
auto string = pop(stack).toStringRef(); \
push( \
stack, \
#define DEFINE_STRING_CHAR_MAP_OP(op_name, char_op) \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#op_name "(str self) -> str"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
auto string = pop(stack).toStringRef(); \
std::stringstream ss; \
for (char c : string) { \
// operator below is intended to be as close to the Python
// implementation in torch/csrc/utils/tensor_list.cpp as possible.
[](const Node* /*node*/) -> Operation {
- return [](Stack* stack) {
+ return [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int elem_ty_val;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
return v;
})());
-void dictSetItem(Stack* stack) {
+void dictSetItem(Stack& stack) {
auto value = pop(stack);
auto idx = pop(stack);
auto dict = pop(stack).toGenericDict();
dict.insert_or_assign(std::move(idx), std::move(value));
}
-void dictLen(Stack* stack) {
+void dictLen(Stack& stack) {
auto dict = pop(stack).toGenericDict();
push(stack, int64_t(dict.size()));
}
-void dictValues(Stack* stack) {
+void dictValues(Stack& stack) {
auto dict = pop(stack).toGenericDict();
auto values = c10::impl::GenericList(dict.valueType());
for (const auto& entry : dict) {
push(stack, values);
}
-void dictKeys(Stack* stack) {
+void dictKeys(Stack& stack) {
auto dict = pop(stack).toGenericDict();
auto keys = c10::impl::GenericList(dict.keyType());
for (const auto& entry : dict) {
push(stack, keys);
}
-void dictIndex(Stack* stack) {
+void dictIndex(Stack& stack) {
auto key = pop(stack);
auto dict = pop(stack).toGenericDict();
auto value = dict.find(key);
}
template <bool has_default>
-void dictGet(Stack* stack) {
+void dictGet(Stack& stack) {
IValue default_value;
if (has_default) {
default_value = pop(stack);
// If the key is in the dict, return it. Else set it to the default value and
// return that.
-void dictSetDefault(Stack* stack) {
+void dictSetDefault(Stack& stack) {
auto default_value = pop(stack);
auto key = pop(stack);
auto dict = pop(stack).toGenericDict();
}
template <bool has_default>
-void dictPop(Stack* stack) {
+void dictPop(Stack& stack) {
IValue default_value;
if (has_default) {
default_value = pop(stack);
}
}
-void dictDelete(Stack* stack) {
+void dictDelete(Stack& stack) {
dictPop<false>(stack);
// pop pushes an item on the stack but delete does not, so get rid of it
pop(stack);
}
-void dictPopItem(Stack* stack) {
+void dictPopItem(Stack& stack) {
auto dict = pop(stack).toGenericDict();
if (dict.size() == 0) {
AT_ERROR("popitem(): dictionary is empty");
push(stack, tuple);
}
-void dictContains(Stack* stack) {
+void dictContains(Stack& stack) {
auto key = pop(stack);
auto dict = pop(stack).toGenericDict();
push(stack, dict.contains(key));
}
-void dictClear(Stack* stack) {
+void dictClear(Stack& stack) {
auto dict = pop(stack).toGenericDict();
dict.clear();
}
-void dictUpdate(Stack* stack) {
+void dictUpdate(Stack& stack) {
auto to_add = pop(stack).toGenericDict();
auto dict = pop(stack).toGenericDict();
}
}
-void dictItems(Stack* stack) {
+void dictItems(Stack& stack) {
auto dict = pop(stack).toGenericDict();
auto key_type = dict.keyType();
auto value_type = dict.valueType();
push(stack, std::move(items));
}
-void dictCopy(Stack* stack) {
+void dictCopy(Stack& stack) {
push(stack, pop(stack).toGenericDict().copy());
}
-void dictConstructFromList(Stack* stack) {
+void dictConstructFromList(Stack& stack) {
auto input_list = pop(stack);
auto list = input_list.toList();
auto tup_type = list.elementType()->expect<TupleType>();
static const OperatorGeneratorArgs opGenArgs1[] = {
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::rangelist(int n) -> int[]"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int64_t n;
pop(stack, n);
// because all _to_tensor conversion have to have the same operator namet
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::NumToTensor.bool(bool a) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
bool b;
pop(stack, b);
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::device(str a) -> Device"),
- [](Stack* stack) {
+ [](Stack& stack) {
push(stack, c10::Device(pop(stack).toStringRef()));
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::percentFormat(str self, ...) -> str"),
- [](Stack* stack) {
+ [](Stack& stack) {
size_t num_inputs = pop(stack).toInt();
- percentFormat(*stack, num_inputs);
+ percentFormat(stack, num_inputs);
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::to.prim_other(Tensor(a) self, bool non_blocking=False, bool copy=False) -> Tensor(a|b)"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor self;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
bool non_blocking;
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::requires_grad(Tensor a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.requires_grad());
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::grad(Tensor a) -> Tensor(*)"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.grad());
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::is_sparse(Tensor a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.is_sparse());
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::is_sparse_csr(Tensor a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.is_sparse_csr());
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::is_mkldnn(Tensor a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.is_mkldnn());
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::is_mlc(Tensor a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.is_mlc());
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::is_vulkan(Tensor a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.is_vulkan());
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::is_quantized(Tensor a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.is_quantized());
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::is_meta(Tensor a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.is_meta());
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::is_ort(Tensor a) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.is_ort());
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::name(Tensor a) -> str?"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
if (a.name() == "") {
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::index(Device self) -> int?"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto d = pop(stack).toDevice();
if (d.has_index()) {
push(stack, d.index());
// TODO return generator object when torchscript supports RNG
// first-class
TORCH_SELECTIVE_SCHEMA("aten::manual_seed(int seed) -> ()"),
- [](Stack* stack) { at::manual_seed(pop(stack).toInt()); },
+ [](Stack& stack) { at::manual_seed(pop(stack).toInt()); },
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::cuda(Tensor(a) self) -> Tensor(a|b)"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a;
pop(stack, a);
push(stack, a.cuda());
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::AutogradZero() -> Tensor"),
- [](Stack* stack) { stack->emplace_back(at::Tensor()); },
+ [](Stack& stack) { stack.emplace_back(at::Tensor()); },
aliasAnalysisSpecialCase()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"prim::ReductionSizes(int[] size, int[] red_axes, bool keepdim = False) -> int[]"),
- [](Stack* stack) {
+ [](Stack& stack) {
bool keepdim = pop(stack).toBool();
c10::List<int64_t> axes = pop(stack).toIntList();
c10::List<int64_t> size = pop(stack).toIntList();
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::BroadcastSizes(...) -> int[]"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto num_inputs = pop(stack).toInt();
std::vector<int64_t> size;
size.reserve(8);
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::warn(str message, int stacklevel=2) -> ()"),
- [](Stack* stack) {
+ [](Stack& stack) {
TORCH_CHECK(false, "warn is implemented directly in the interpreter");
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"onnx::Reshape(Tensor input, Tensor shape) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor input, shape;
pop(stack, input, shape);
shape = shape.contiguous();
aliasAnalysisSpecialCase()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("onnx::Shape(Tensor t) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto t = pop(stack).toTensor();
at::IntArrayRef sizes = t.sizes();
auto sizes_tensor = torch::empty(
for (const auto i : c10::irange(sizes.size())) {
accessor[i] = sizes[i];
}
- stack->emplace_back(sizes_tensor);
+ stack.emplace_back(sizes_tensor);
},
aliasAnalysisSpecialCase()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::AutogradAnyNonZero(...) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto num_inputs = pop(stack).toInt();
bool result = false;
for (const IValue& v : last(stack, num_inputs)) {
}
}
drop(stack, num_inputs);
- stack->emplace_back(result);
+ stack.emplace_back(result);
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::AutogradAllZero(...) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto num_inputs = pop(stack).toInt();
bool result = true;
for (const IValue& v : last(stack, num_inputs)) {
}
}
drop(stack, num_inputs);
- stack->emplace_back(result);
+ stack.emplace_back(result);
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::AutogradAllNonZero(...) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto num_inputs = pop(stack).toInt();
bool result = true;
for (const IValue& v : last(stack, num_inputs)) {
}
}
drop(stack, num_inputs);
- stack->emplace_back(result);
+ stack.emplace_back(result);
},
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::AutogradAdd(Any a, Any b) -> Any"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor a, b;
pop(stack, a, b);
// NOLINTNEXTLINE(bugprone-branch-clone)
if (!a.defined() && !b.defined()) {
// undef + undef == undef
- stack->emplace_back(a);
+ stack.emplace_back(a);
} else if (!a.defined()) {
- stack->emplace_back(b);
+ stack.emplace_back(b);
} else if (!b.defined()) {
- stack->emplace_back(a);
+ stack.emplace_back(a);
} else {
- stack->emplace_back(a + b);
+ stack.emplace_back(a + b);
}
},
aliasAnalysisSpecialCase()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::_size_if_not_equal(int[] self_size, int[] other_size) -> int[]?"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue self_size, other_size;
pop(stack, self_size, other_size);
auto s = self_size.toIntVector();
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::_unwrap_optional(t(a)? optional) -> t(a)"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto val = pop(stack);
TORCH_CHECK(!val.isNone(), "Unwrapping null optional");
push(stack, std::move(val));
RegisterOperators reg1(
createOperators(opGenArgs1, sizeof(opGenArgs1) / sizeof(opGenArgs1[0])));
-void hashValue(Stack* stack) {
+void hashValue(Stack& stack) {
auto value = pop(stack);
push(stack, value.hash());
}
#define DEFINE_CONVERT_BASE_OP(op_name, prefix, char_op) \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA(#op_name "(int i) -> str"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
auto i = pop(stack).toInt(); \
std::stringstream ss; \
if (i < 0) { \
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::bin(int i) -> str"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto i = pop(stack).toInt();
std::stringstream ss;
if (i == 0) {
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"prim::StringIndex(str string, int index) -> str"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto index = pop(stack).toInt();
auto string = pop(stack).toStringRef();
auto norm_index = normalizeIndex(index, string.size());
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::chr(int i) -> str"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto i = pop(stack).toInt();
std::stringstream ss;
TORCH_CHECK(
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::modf(float a) -> (float, float)"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
double a;
pop(stack, a);
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::frexp(float a) -> (float, int)"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
double a;
pop(stack, a);
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::ldexp(float x, int i) -> float"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
double a;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
float),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::abs(Tensor x) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor x;
pop(stack, x);
push(stack, x.abs());
float),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::_tensor_to_list(Tensor self) -> int[]"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor t;
pop(stack, t);
c10::List<int64_t> elems;
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::_list_to_tensor(int[] self) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
c10::List<int64_t> l = pop(stack).toIntList();
auto t = torch::empty(
{static_cast<int64_t>(l.size())}, at::dtype(at::kInt));
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::sum.int(int[] self) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
c10::List<int64_t> l = pop(stack).toIntList();
auto sum = 0;
for (const auto& elem : l) {
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::sum.float(float[] self) -> float"),
- [](Stack* stack) {
+ [](Stack& stack) {
c10::List<double> l = pop(stack).toDoubleList();
auto sum = 0.0;
for (const auto& elem : l) {
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::sum.complex(complex[] self) -> complex"),
- [](Stack* stack) {
+ [](Stack& stack) {
c10::List<c10::complex<double>> l = pop(stack).toComplexDoubleList();
c10::complex<double> sum = 0.0;
for (const auto i : c10::irange(l.size())) {
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::sum.bool(bool[] self) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
c10::List<bool> l = pop(stack).toBoolList();
auto sum = 0;
for (const auto& elem : l) {
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::any.str(str[] self) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto l = pop(stack).toList();
for (const auto& elem : l) {
if (elem != "") {
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::any.int(int[] self) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
c10::List<int64_t> l = pop(stack).toIntList();
for (const auto& elem : l) {
if (elem) {
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::any.float(float[] self) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
c10::List<double> l = pop(stack).toDoubleList();
for (const auto& elem : l) {
if (elem) {
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::any.bool(bool[] self) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
c10::List<bool> l = pop(stack).toBoolList();
for (const auto& elem : l) {
if (elem) {
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::all.int(int[] self) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
c10::List<int64_t> l = pop(stack).toIntList();
for (const auto& elem : l) {
if (!elem) {
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::all.float(float[] self) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
c10::List<double> l = pop(stack).toDoubleList();
for (const auto& elem : l) {
if (!elem) {
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::all.bool(bool[] self) -> bool"),
- [](Stack* stack) {
+ [](Stack& stack) {
c10::List<bool> l = pop(stack).toBoolList();
for (const auto& elem : l) {
if (!elem) {
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("aten::divmod.int(int x, int y) -> (int, int)"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int64_t a, b;
lldiv_t divresult = {};
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA(
"aten::divmod.float(float x, float y) -> (float, float)"),
- [](Stack* stack) {
+ [](Stack& stack) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
double a, b;
pop(stack, a, b);
aliasAnalysisFromSchema()),
OperatorGeneratorArgs(
TORCH_SELECTIVE_SCHEMA("prim::id(AnyClassType? x) -> int"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue a;
pop(stack, a);
if (a.isNone()) {
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA("aten::divmod." #type_a "_" #type_b "(" #type_a \
" x," #type_b " y) -> (float, float)"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
type_a a; \
type_b b; \
pop(stack, a, b); \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA("aten::Complex." #type_a "_" #type_b "(" #type_a \
" x," #type_b " y) -> complex"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
actual_type_a a; \
actual_type_b b; \
pop(stack, a, b); \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA("aten::Complex." #type_a "_" #type_b "(" #type_a \
" x," #type_b " y) -> complex"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
actual_type_a a; \
actual_type_b b; \
pop(stack, a, b); \
OperatorGeneratorArgs( \
TORCH_SELECTIVE_SCHEMA("aten::Complex." #type_b "_" #type_a \
"(" #type_b " x," #type_a " y) -> complex"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
actual_type_b a; \
actual_type_a b; \
pop(stack, a, b); \
{Operator(
prim::profile,
[](const Node* node) -> Operation {
- return [](Stack* stack) {
+ return [](Stack& stack) {
AT_ERROR(
"Must be lowered to Interpreter's PROFILE instruction"); // NOLINT
};
Operator(
prim::profile_ivalue,
[](const Node* node) -> Operation {
- return [](Stack* stack) {
+ return [](Stack& stack) {
AT_ERROR(
"Must be lowered to Interpreter's PROFILE instruction"); // NOLINT
};
prim::FusionGroup,
[](const Node* node) -> Operation {
const auto key = registerFusion(node);
- return [key](Stack* stack) {
+ return [key](Stack& stack) {
RECORD_FUNCTION("FusionGroup", std::vector<c10::IValue>());
- runFusion(key, *stack);
+ runFusion(key, stack);
};
},
aliasAnalysisSpecialCase()),
t->castRaw<TensorType>()->requiresGrad().has_value());
return *t->castRaw<TensorType>()->requiresGrad();
});
- return [rg_props](Stack* stack) {
+ return [rg_props](Stack& stack) {
auto num_inputs = rg_props.size();
// Check every input's shape against profiled (expected) shape.
for (const auto i : c10::irange(num_inputs)) {
auto outputs_used = fmap(node->outputs(), [](const Value* v) {
return v->uses().size() > 0;
});
- return [=](Stack* stack) {
+ return [=](Stack& stack) {
RECORD_FUNCTION("chunk", last(stack, 1));
at::Tensor t;
pop(stack, t);
auto result = at::chunk(t, chunks, dim);
- stack->insert(
- stack->end(),
+ stack.insert(
+ stack.end(),
std::make_move_iterator(result.begin()),
std::make_move_iterator(result.end()));
// NB: Chunk can sometimes return a smaller number of outputs.
num_results);
// We know that the output is unused, so it's ok to push
// anything on the stack.
- stack->emplace_back();
+ stack.emplace_back();
}
}
};
[](const Node* node) -> Operation {
int64_t raw_dim = node->i(attr::dim);
int64_t chunks = node->i(attr::chunks);
- return [raw_dim, chunks](Stack* stack) {
+ return [raw_dim, chunks](Stack& stack) {
c10::List<int64_t> shape = pop(stack).toIntList();
c10::List<int64_t> regular_shape = shape.copy();
c10::List<int64_t> last_shape = shape.copy();
aliasAnalysisSpecialCase()),
Operator(
"aten::_grad_sum_to_size(Tensor(a) self, int[]? size) -> Tensor(a)",
- [](Stack* stack) {
+ [](Stack& stack) {
RECORD_FUNCTION("_grad_sum_to_size", std::vector<c10::IValue>());
IValue self, size;
pop(stack, self, size);
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA(
"prim::ModuleContainerIndex.list(Any self, int ind) -> Any"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue ind = pop(stack);
IValue module_dict = pop(stack);
std::stringstream ss;
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA(
"prim::ModuleContainerIndex.dict(Any self, str ind) -> Any"),
- [](Stack* stack) {
+ [](Stack& stack) {
IValue ind = pop(stack);
IValue module_dict = pop(stack);
push(stack, module_dict.toModule().attr(ind.toStringRef()));
Operator(
prim::TypeCheck /* (...) -> (..., bool) */,
[](const Node* /* node */) -> Operation {
- return [](Stack* /* stack */) {
+ return [](Stack& /* stack */) {
AT_ERROR("prim::TypeCheck not yet implemented"); // NOLINT
};
},
Operator(
prim::FallbackGraph,
[](const Node* node) -> Operation {
- return [](Stack* stack) {
+ return [](Stack& stack) {
AT_ERROR(
"Must be converted to prim::FunctionCall by replaceFallbackGraphWithFallbackFunction"); // NOLINT
};
aliasAnalysisSpecialCase()),
Operator(
"prim::Guard(Tensor(a) t) -> Tensor(a)",
- [](Stack* stack) { AT_ERROR("Should be replaced by prim::BailOut"); },
+ [](Stack& stack) { AT_ERROR("Should be replaced by prim::BailOut"); },
aliasAnalysisFromSchema()),
Operator(
"prim::BailOut(...) -> Tensor(a)",
- [](Stack* /* stack */) {
+ [](Stack& /* stack */) {
AT_ERROR("prim::BailOut not yet implemented"); // NOLINT
},
aliasAnalysisFromSchema()),
Operator(
"prim::BailoutTemplate() -> int",
- [](Stack* stack) {
+ [](Stack& stack) {
// TODO: today, we put a single bailout template at the front to
// carry the un-optimized graph for bailout nodes to use. Ideally
// this should never run, but we haven't written the code to remove
aliasAnalysisFromSchema()),
Operator(
"aten::grad(Tensor[] outputs, Tensor[] inputs, Tensor?[]? grad_outputs=None, bool? retain_graph=None, bool create_graph=False, bool allow_unused=False) -> Tensor?[]",
- [](Stack* stack) {
+ [](Stack& stack) {
bool allow_unused = pop(stack).toBool();
bool create_graph = pop(stack).toBool();
auto retain_graph = pop(stack).toOptional<bool>();
// create_graph=True so we use aliasAnalysisConservative for these two OPs
Operator(
"aten::backward.TensorList(Tensor[] tensors, Tensor?[]? grad_tensors=None, bool? retain_graph=None, bool create_graph=False) -> ()",
- [](Stack* stack) {
+ [](Stack& stack) {
bool create_graph = pop(stack).toBool();
auto retain_graph = pop(stack).toOptional<bool>();
auto grad_tensors = pop(stack);
aliasAnalysisConservative()),
Operator(
"aten::save(t item, str filename) -> ()",
- [](Stack* stack) {
+ [](Stack& stack) {
auto filename = pop(stack).toStringRef();
auto ivalue = pop(stack);
aliasAnalysisFromSchema()),
Operator(
"prim::IgnoredPythonOp(...) -> None",
- [](Stack* stack) {
+ [](Stack& stack) {
throw JITException(
"This Python function is annotated to be ignored"
" and cannot be and has not been included in the exported"
aliasAnalysisFromSchema()),
Operator(
"aten::wait(Future(t) self) -> t",
- [](Stack* stack) {
+ [](Stack& stack) {
TORCH_CHECK(
false, "wait is implemented directly in the interpreter");
},
RegisterOperators logging_operators(
{Operator(
"prim::AddStatValue(str key, int val) -> ()",
- [](Stack* stack) {
+ [](Stack& stack) {
auto val = pop(stack).toInt();
auto key = pop(stack).toString();
aliasAnalysisFromSchema()),
Operator(
"prim::TimePoint() -> int",
- [](Stack* stack) {
+ [](Stack& stack) {
auto schema = parseSchema("prim::TimePoint() -> int");
Node* node = nullptr;
// TODO: remove this custom tracing code once the custom op bugfix
},
aliasAnalysisFromSchema())});
-C10_UNUSED void hashValue(Stack* stack) {
+C10_UNUSED void hashValue(Stack& stack) {
auto value = pop(stack);
push(stack, value.hash());
}
}
template <bool has_reverse_arg, bool copy_return_list>
-void sort_op(Stack* stack) {
+void sort_op(Stack& stack) {
bool reverse = has_reverse_arg ? pop(stack).toBool() : false;
auto g_list = pop(stack).toList();
") ");
}
-void interpolate_op(Stack* stack) {
+void interpolate_op(Stack& stack) {
at::Tensor input;
IValue size;
IValue scale_factors;
return scale_factor_double;
}
-void upsample_nearest_op(Stack* stack) {
+void upsample_nearest_op(Stack& stack) {
at::Tensor input;
IValue size;
IValue scale_factor_int;
push(stack, std::move(res));
}
-void upsample_op(Stack* stack) {
+void upsample_op(Stack& stack) {
at::Tensor input;
IValue size;
IValue scale_factor_int;
push(stack, std::move(res));
}
-void upsample_bilinear_op(Stack* stack) {
+void upsample_bilinear_op(Stack& stack) {
at::Tensor input;
IValue size;
IValue scale_factor_int;
}
template <bool if_set_requires_grad>
-void createTensorFromList(Stack* stack) {
+void createTensorFromList(Stack& stack) {
// torch.tensor has a fourth requires_grad arg but torch.as_tensor not, so
// we use the template arg to distinguish between these two cases
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA(
"aten::split(Tensor self, int[] split_sizes, int dim=0) -> Tensor[]"),
- [](Stack* stack) {
+ [](Stack& stack) {
RECORD_FUNCTION("split_with_sizes", last(stack, 3));
auto result = at::split_with_sizes(
"aten::tensor." #operator_type "(" #operator_type \
" t, *, ScalarType? dtype=None, Device? device=None" \
", bool requires_grad=False) -> Tensor"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
c_type scalar_val; \
IValue dtype; \
IValue device; \
TORCH_SELECTIVE_SCHEMA( \
"aten::as_tensor." #operator_type "(" #operator_type \
" t, *, ScalarType? dtype=None, Device? device=None) -> Tensor"), \
- [](Stack* stack) { \
+ [](Stack& stack) { \
c_type scalar_val; \
IValue dtype; \
IValue device; \
// tensor_new.cpp
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA("aten::_infer_size(int[] a, int[] b) -> int[]"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto a = pop(stack);
auto b = pop(stack);
push(stack, at::infer_size(a.toIntVector(), b.toIntVector()));
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA(
"aten::_no_grad_embedding_renorm_(Tensor weight, Tensor input, float max_norm, float norm_type) -> Tensor"),
- [](Stack* stack) {
+ [](Stack& stack) {
at::Tensor weight;
at::Tensor input;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA(
"aten::as_tensor(Tensor(a) data, *, ScalarType? dtype=None, Device? device=None) -> Tensor(a|b)"),
- [](Stack* stack) {
+ [](Stack& stack) {
auto device = pop(stack).toOptional<c10::Device>();
auto dtype = pop(stack).toOptional<at::ScalarType>();
at::Tensor data = pop(stack).toTensor();
TORCH_SELECTIVE_SCHEMA(
"aten::_pack_sequence(Tensor output, Tensor batch_sizes, Tensor? sorted_indices, "
"Tensor? unsorted_indices) -> (Tensor, Tensor, Tensor?, Tensor?)"),
- [](Stack* stack) {},
+ [](Stack& stack) {},
aliasAnalysisFromSchema()),
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA("aten::_get_tracing_state() -> bool"),
- [](Stack* stack) { push(stack, false); },
+ [](Stack& stack) { push(stack, false); },
aliasAnalysisFromSchema()),
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA("aten::is_scripting() -> bool"),
- [](Stack* stack) { push(stack, true); },
+ [](Stack& stack) { push(stack, true); },
aliasAnalysisFromSchema()),
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA("aten::has_torch_function(...) -> bool"),
- [](Stack* stack) { push(stack, false); },
+ [](Stack& stack) { push(stack, false); },
aliasAnalysisFromSchema()),
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA(
"aten::_no_grad_uniform_(Tensor(a!) tensor, float a, float b) -> Tensor(a!)"),
- [](Stack* stack) {
+ [](Stack& stack) {
// TODO: remove when script supports setting grad mode
torch::NoGradGuard no_grad;
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA(
"aten::_no_grad_normal_(Tensor(a!) tensor, float mean, float std) -> Tensor(a!)"),
- [](Stack* stack) {
+ [](Stack& stack) {
// TODO: remove when script supports setting grad mode
torch::NoGradGuard no_grad;
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA(
"aten::_no_grad_fill_(Tensor(a!) tensor, float val) -> Tensor(a!)"),
- [](Stack* stack) {
+ [](Stack& stack) {
// TODO: remove when script supports setting grad mode
torch::NoGradGuard no_grad;
OperatorGenerator(
TORCH_SELECTIVE_SCHEMA(
"aten::_no_grad_zero_(Tensor(a!) tensor) -> Tensor(a!)"),
- [](Stack* stack) {
+ [](Stack& stack) {
// TODO: remove when script supports setting grad mode
torch::NoGradGuard no_grad;
aliasAnalysisFromSchema()),
Operator(
"aten::is_grad_enabled() -> bool",
- [](Stack* stack) { push(stack, torch::GradMode::is_enabled()); },
+ [](Stack& stack) { push(stack, torch::GradMode::is_enabled()); },
aliasAnalysisConservative()),
Operator(
"aten::set_grad_enabled(bool val) -> ()",
- [](Stack* stack) { torch::GradMode::set_enabled(pop(stack).toBool()); },
+ [](Stack& stack) { torch::GradMode::set_enabled(pop(stack).toBool()); },
aliasAnalysisConservative()),
});
} // namespace
auto g = node->g(attr::Subgraph);
auto module = std::make_shared<torch::jit::StaticModule>(g);
auto num_inputs = module->num_inputs();
- return [module, num_inputs](Stack* stack) {
+ return [module, num_inputs](Stack& stack) {
RECORD_FUNCTION("Static Runtime", std::vector<c10::IValue>());
auto inps = torch::jit::last(stack, num_inputs);
// TODO maybe avoid call to vec
if (module->num_outputs() > 1) {
for (auto& o : outputs.toTuple()->elements()) {
- push_one(*stack, std::move(o));
+ push_one(stack, std::move(o));
}
} else {
- push_one(*stack, std::move(outputs));
+ push_one(stack, std::move(outputs));
}
return 0;
};
}
DCHECK(op_);
- op_->operator()(&stack);
+ op_->operator()(stack);
DCHECK_EQ(stack.size(), node_->outputs().size());
for (const auto i : c10::irange(node_->outputs().size())) {