# What folders / files Doxygen should process.
INPUT = ../../../aten/src/ATen/ATen.h \
../../../aten/src/ATen/Backend.h \
+ ../../../aten/src/ATen/core/ivalue.h \
../../../aten/src/ATen/core/ScalarType.h \
../../../aten/src/ATen/core/Tensor.h \
../../../aten/src/ATen/cuda/CUDAContext.h \
../../../torch/csrc/autograd/generated/variable_factories.h \
../../../torch/csrc/jit/custom_operator.h \
../../../torch/csrc/jit/import.h \
- ../../../torch/csrc/jit/ivalue.h \
../../../torch/csrc/jit/script/module.h
# Don't include .cpp files!
FILE_PATTERNS = *.h
#include "torch/csrc/autograd/generated/variable_factories.h"
#include "torch/csrc/autograd/variable.h"
#include "torch/csrc/jit/argument_spec.h"
-#include "torch/csrc/jit/assertions.h"
+#include "c10/util/Exception.h"
#include "torch/csrc/jit/attributes.h"
#include "torch/csrc/jit/autodiff.h"
#include "torch/csrc/jit/code_template.h"
#include "torch/csrc/jit/custom_operator.h"
#include "torch/csrc/jit/dynamic_dag.h"
#include "torch/csrc/jit/fuser/interface.h"
-#include "torch/csrc/jit/interned_strings.h"
+#include "ATen/core/interned_strings.h"
#include "torch/csrc/jit/interpreter.h"
#include "torch/csrc/jit/ir.h"
#include "torch/csrc/jit/operator.h"
#include "torch/csrc/autograd/variable.h"
#include "torch/csrc/jit/graph_executor.h"
-#include "torch/csrc/jit/ivalue.h"
+#include "ATen/core/ivalue.h"
#include "torch/csrc/jit/script/compiler.h"
#include "torch/csrc/jit/script/module.h"
std::find_if(nodes.begin(), nodes.end(), [](const Node* node) {
return node->kind() == prim::FusionGroup;
});
- JIT_ASSERTM(
+ AT_CHECK(
maybe_fusion_group != nodes.end(),
"testRegisterFusionCachesKernel: could not create FusionGroup");
return *maybe_fusion_group;
void testNewVertex() {
auto graph = newDynamicDAG();
- JIT_ASSERT(graph->debugNumVertices() == 0);
+ AT_ASSERT(graph->debugNumVertices() == 0);
auto a = graph->newVertex("a");
- JIT_ASSERT(graph->debugNumVertices() == 1);
- JIT_ASSERT(a->ord == 0);
- JIT_ASSERT(a->data.size() == 1);
- JIT_ASSERT(a->data[0] == "a");
- JIT_ASSERT(a->in_edges().size() == 0);
- JIT_ASSERT(a->out_edges().size() == 0);
+ AT_ASSERT(graph->debugNumVertices() == 1);
+ AT_ASSERT(a->ord == 0);
+ AT_ASSERT(a->data.size() == 1);
+ AT_ASSERT(a->data[0] == "a");
+ AT_ASSERT(a->in_edges().size() == 0);
+ AT_ASSERT(a->out_edges().size() == 0);
auto b = graph->newVertex("b");
auto c = graph->newVertex("c");
- JIT_ASSERT(graph->debugNumVertices() == 3);
- JIT_ASSERT(b->ord == 1);
- JIT_ASSERT(c->ord == 2);
+ AT_ASSERT(graph->debugNumVertices() == 3);
+ AT_ASSERT(b->ord == 1);
+ AT_ASSERT(c->ord == 2);
}
void testAddEdgeBasic() {
graph->addEdge(a, b);
graph->addEdge(b, c);
graph->addEdge(a, c);
- JIT_ASSERT(a->in_edges().size() == 0);
- JIT_ASSERT(a->out_edges().size() == 2);
- JIT_ASSERT(a->out_edges().contains(b));
- JIT_ASSERT(a->out_edges().contains(c));
- JIT_ASSERT(b->in_edges().size() == 1);
- JIT_ASSERT(b->out_edges().size() == 1);
- JIT_ASSERT(b->in_edges().contains(a));
- JIT_ASSERT(b->out_edges().contains(c));
- JIT_ASSERT(c->in_edges().size() == 2);
- JIT_ASSERT(c->out_edges().size() == 0);
- JIT_ASSERT(c->in_edges().contains(a));
- JIT_ASSERT(c->in_edges().contains(b));
+ AT_ASSERT(a->in_edges().size() == 0);
+ AT_ASSERT(a->out_edges().size() == 2);
+ AT_ASSERT(a->out_edges().contains(b));
+ AT_ASSERT(a->out_edges().contains(c));
+ AT_ASSERT(b->in_edges().size() == 1);
+ AT_ASSERT(b->out_edges().size() == 1);
+ AT_ASSERT(b->in_edges().contains(a));
+ AT_ASSERT(b->out_edges().contains(c));
+ AT_ASSERT(c->in_edges().size() == 2);
+ AT_ASSERT(c->out_edges().size() == 0);
+ AT_ASSERT(c->in_edges().contains(a));
+ AT_ASSERT(c->in_edges().contains(b));
}
void testAddEdgeCycleDetection() {
} catch (c10::Error& err) {
erred = true;
}
- JIT_ASSERT(erred);
+ AT_ASSERT(erred);
}
void testAddEdgeReordersBasic() {
auto graph = newDynamicDAG();
auto a = graph->newVertex("a");
auto b = graph->newVertex("b");
- JIT_ASSERT(a->ord == 0);
- JIT_ASSERT(b->ord == 1);
+ AT_ASSERT(a->ord == 0);
+ AT_ASSERT(b->ord == 1);
graph->addEdge(b, a);
- JIT_ASSERT(a->ord == 1);
- JIT_ASSERT(b->ord == 0);
+ AT_ASSERT(a->ord == 1);
+ AT_ASSERT(b->ord == 0);
}
void testAddEdgeReordersComplicated() {
auto d = graph->newVertex("d");
graph->addEdge(a, b);
graph->addEdge(c, d);
- JIT_ASSERT(a->ord == 0);
- JIT_ASSERT(b->ord == 1);
- JIT_ASSERT(c->ord == 2);
- JIT_ASSERT(d->ord == 3);
+ AT_ASSERT(a->ord == 0);
+ AT_ASSERT(b->ord == 1);
+ AT_ASSERT(c->ord == 2);
+ AT_ASSERT(d->ord == 3);
graph->addEdge(d, a);
- JIT_ASSERT(c->ord == 0);
- JIT_ASSERT(d->ord == 1);
- JIT_ASSERT(a->ord == 2);
- JIT_ASSERT(b->ord == 3);
- JIT_ASSERT(c->in_edges().size() == 0);
- JIT_ASSERT(c->out_edges().size() == 1);
- JIT_ASSERT(c->out_edges().contains(d));
- JIT_ASSERT(d->in_edges().size() == 1);
- JIT_ASSERT(d->out_edges().size() == 1);
- JIT_ASSERT(d->in_edges().contains(c));
- JIT_ASSERT(d->out_edges().contains(a));
- JIT_ASSERT(a->in_edges().size() == 1);
- JIT_ASSERT(a->out_edges().size() == 1);
- JIT_ASSERT(a->in_edges().contains(d));
- JIT_ASSERT(a->out_edges().contains(b));
- JIT_ASSERT(b->in_edges().size() == 1);
- JIT_ASSERT(b->out_edges().size() == 0);
- JIT_ASSERT(b->in_edges().contains(a));
+ AT_ASSERT(c->ord == 0);
+ AT_ASSERT(d->ord == 1);
+ AT_ASSERT(a->ord == 2);
+ AT_ASSERT(b->ord == 3);
+ AT_ASSERT(c->in_edges().size() == 0);
+ AT_ASSERT(c->out_edges().size() == 1);
+ AT_ASSERT(c->out_edges().contains(d));
+ AT_ASSERT(d->in_edges().size() == 1);
+ AT_ASSERT(d->out_edges().size() == 1);
+ AT_ASSERT(d->in_edges().contains(c));
+ AT_ASSERT(d->out_edges().contains(a));
+ AT_ASSERT(a->in_edges().size() == 1);
+ AT_ASSERT(a->out_edges().size() == 1);
+ AT_ASSERT(a->in_edges().contains(d));
+ AT_ASSERT(a->out_edges().contains(b));
+ AT_ASSERT(b->in_edges().size() == 1);
+ AT_ASSERT(b->out_edges().size() == 0);
+ AT_ASSERT(b->in_edges().contains(a));
}
void testRemoveEdgeBasic() {
auto a = graph->newVertex("a");
auto b = graph->newVertex("b");
graph->addEdge(a, b);
- JIT_ASSERT(graph->debugNumVertices() == 2);
+ AT_ASSERT(graph->debugNumVertices() == 2);
graph->removeEdge(a, b);
- JIT_ASSERT(graph->debugNumVertices() == 2);
- JIT_ASSERT(a->out_edges().size() == 0);
- JIT_ASSERT(b->in_edges().size() == 0);
+ AT_ASSERT(graph->debugNumVertices() == 2);
+ AT_ASSERT(a->out_edges().size() == 0);
+ AT_ASSERT(b->in_edges().size() == 0);
}
void testRemoveVertexBasic() {
auto c = graph->newVertex("c");
graph->addEdge(a, b);
graph->addEdge(b, c);
- JIT_ASSERT(graph->debugNumVertices() == 3);
+ AT_ASSERT(graph->debugNumVertices() == 3);
graph->removeVertex(b);
- JIT_ASSERT(graph->debugNumVertices() == 2);
- JIT_ASSERT(a->out_edges().size() == 0);
- JIT_ASSERT(c->in_edges().size() == 0);
+ AT_ASSERT(graph->debugNumVertices() == 2);
+ AT_ASSERT(a->out_edges().size() == 0);
+ AT_ASSERT(c->in_edges().size() == 0);
}
void testContractEdgeBasic() {
graph->addEdge(b, c);
graph->addEdge(c, d);
graph->contractEdge(b, c);
- JIT_ASSERT(graph->debugNumVertices() == 3);
- JIT_ASSERT(a->out_edges().size() == 1);
- JIT_ASSERT(d->in_edges().size() == 1);
- JIT_ASSERT(*a->out_edges().begin() == *d->in_edges().begin());
+ AT_ASSERT(graph->debugNumVertices() == 3);
+ AT_ASSERT(a->out_edges().size() == 1);
+ AT_ASSERT(d->in_edges().size() == 1);
+ AT_ASSERT(*a->out_edges().begin() == *d->in_edges().begin());
auto* contracted = *a->out_edges().begin();
- JIT_ASSERT(contracted->data.size() == 2);
- JIT_ASSERT(contracted->data[0] == "b");
- JIT_ASSERT(contracted->data[1] == "c");
- JIT_ASSERT(contracted->out_edges().size() == 1);
- JIT_ASSERT(contracted->in_edges().size() == 1);
- JIT_ASSERT(contracted->in_edges().contains(a));
- JIT_ASSERT(contracted->out_edges().contains(d));
+ AT_ASSERT(contracted->data.size() == 2);
+ AT_ASSERT(contracted->data[0] == "b");
+ AT_ASSERT(contracted->data[1] == "c");
+ AT_ASSERT(contracted->out_edges().size() == 1);
+ AT_ASSERT(contracted->in_edges().size() == 1);
+ AT_ASSERT(contracted->in_edges().contains(a));
+ AT_ASSERT(contracted->out_edges().contains(d));
}
void testContractEdgeCycleDetection() {
graph->addEdge(a, b);
graph->addEdge(b, c);
graph->addEdge(a, c);
- JIT_ASSERT(!graph->contractEdge(a, c));
+ AT_ASSERT(!graph->contractEdge(a, c));
}
void testDynamicDAG() {
curNode = original;
size_t idx = 0;
while (curNode != n->owningBlock()->return_node()) {
- JIT_ASSERT(originalOrdering[idx] == curNode);
+ AT_ASSERT(originalOrdering[idx] == curNode);
if (isAfter) {
curNode = curNode->next();
} else {
const std::string& insertPoint,
bool after) {
if (after) {
- JIT_ASSERT(nodes.at(toInsert)->prev() == nodes.at(insertPoint));
+ AT_ASSERT(nodes.at(toInsert)->prev() == nodes.at(insertPoint));
} else {
- JIT_ASSERT(nodes.at(toInsert)->next() == nodes.at(insertPoint));
+ AT_ASSERT(nodes.at(toInsert)->next() == nodes.at(insertPoint));
}
}
// Check that we are removing `this`'s deps properly when we need to split
// `this` and deps (see code for what the hell that means)
TopoMoveTestFixture fixture;
- JIT_ASSERT(fixture.moveBeforeTopologicallyValid("q", "s"));
+ AT_ASSERT(fixture.moveBeforeTopologicallyValid("q", "s"));
fixture.checkPostCondition("q", "s", false);
}
// Move after
{
// Simple move backward
TopoMoveTestFixture fixture;
- JIT_ASSERT(fixture.moveAfterTopologicallyValid("c", "a"));
+ AT_ASSERT(fixture.moveAfterTopologicallyValid("c", "a"));
fixture.checkPostCondition("c", "a", true);
}
{
// simple invalid move backward
TopoMoveTestFixture fixture;
- JIT_ASSERT(!fixture.moveAfterTopologicallyValid("d", "a"));
+ AT_ASSERT(!fixture.moveAfterTopologicallyValid("d", "a"));
}
{
// doesn't actually move anything
TopoMoveTestFixture fixture;
- JIT_ASSERT(fixture.moveAfterTopologicallyValid("f", "e"));
+ AT_ASSERT(fixture.moveAfterTopologicallyValid("f", "e"));
fixture.checkPostCondition("f", "e", true);
}
{
// move backward with multiple dependencies
TopoMoveTestFixture fixture;
- JIT_ASSERT(fixture.moveAfterTopologicallyValid("e", "c"));
+ AT_ASSERT(fixture.moveAfterTopologicallyValid("e", "c"));
fixture.checkPostCondition("e", "c", true);
}
{
// Move backward with non-zero working set
TopoMoveTestFixture fixture;
- JIT_ASSERT(fixture.moveAfterTopologicallyValid("k", "f"));
+ AT_ASSERT(fixture.moveAfterTopologicallyValid("k", "f"));
fixture.checkPostCondition("k", "f", true);
}
{
// Simple move forward
TopoMoveTestFixture fixture;
- JIT_ASSERT(fixture.moveAfterTopologicallyValid("c", "d"));
+ AT_ASSERT(fixture.moveAfterTopologicallyValid("c", "d"));
fixture.checkPostCondition("c", "d", true);
}
{
// Move forward with non-zero working set
TopoMoveTestFixture fixture;
- JIT_ASSERT(fixture.moveAfterTopologicallyValid("f", "l"));
+ AT_ASSERT(fixture.moveAfterTopologicallyValid("f", "l"));
fixture.checkPostCondition("f", "l", true);
}
{
// Simple move forward
TopoMoveTestFixture fixture;
- JIT_ASSERT(fixture.moveBeforeTopologicallyValid("b", "d"));
+ AT_ASSERT(fixture.moveBeforeTopologicallyValid("b", "d"));
fixture.checkPostCondition("b", "d", false);
}
{
// Simple move backward
TopoMoveTestFixture fixture;
- JIT_ASSERT(fixture.moveBeforeTopologicallyValid("c", "a"));
+ AT_ASSERT(fixture.moveBeforeTopologicallyValid("c", "a"));
fixture.checkPostCondition("c", "a", false);
}
{
// doesn't actually move anything
TopoMoveTestFixture fixture;
- JIT_ASSERT(fixture.moveBeforeTopologicallyValid("a", "b"));
+ AT_ASSERT(fixture.moveBeforeTopologicallyValid("a", "b"));
fixture.checkPostCondition("a", "b", false);
}
{
// move forward with deps
TopoMoveTestFixture fixture;
- JIT_ASSERT(fixture.moveBeforeTopologicallyValid("f", "m"));
+ AT_ASSERT(fixture.moveBeforeTopologicallyValid("f", "m"));
fixture.checkPostCondition("f", "m", false);
}
{
// move backward with deps
TopoMoveTestFixture fixture;
- JIT_ASSERT(fixture.moveBeforeTopologicallyValid("l", "f"));
+ AT_ASSERT(fixture.moveBeforeTopologicallyValid("l", "f"));
fixture.checkPostCondition("l", "f", false);
}
// check that dependencies in blocks are recognized
{
TopoMoveTestFixture fixture;
- JIT_ASSERT(!fixture.moveAfterTopologicallyValid("l", "m"));
- JIT_ASSERT(!fixture.moveBeforeTopologicallyValid("m", "l"));
- JIT_ASSERT(!fixture.moveAfterTopologicallyValid("n", "l"));
- JIT_ASSERT(!fixture.moveBeforeTopologicallyValid("l", "n"));
+ AT_ASSERT(!fixture.moveAfterTopologicallyValid("l", "m"));
+ AT_ASSERT(!fixture.moveBeforeTopologicallyValid("m", "l"));
+ AT_ASSERT(!fixture.moveAfterTopologicallyValid("n", "l"));
+ AT_ASSERT(!fixture.moveBeforeTopologicallyValid("l", "n"));
}
// Test that moveAfter(n) and moveBefore(n->next()) are not necessarily
// `p`)
{
TopoMoveTestFixture fixture;
- JIT_ASSERT(!fixture.moveAfterTopologicallyValid("n", "o"));
- JIT_ASSERT(fixture.moveBeforeTopologicallyValid("o", "p"));
+ AT_ASSERT(!fixture.moveAfterTopologicallyValid("n", "o"));
+ AT_ASSERT(fixture.moveBeforeTopologicallyValid("o", "p"));
fixture.checkPostCondition("o", "p", false);
}
}
auto aliasDb = AliasAnalysis(graph);
// Can't move past a mutation of a used value
- JIT_ASSERT(!aliasDb.moveAfterTopologicallyValid(c->node(), aMut->node()));
- JIT_ASSERT(aliasDb.moveAfterTopologicallyValid(d->node(), c->node()));
+ AT_ASSERT(!aliasDb.moveAfterTopologicallyValid(c->node(), aMut->node()));
+ AT_ASSERT(aliasDb.moveAfterTopologicallyValid(d->node(), c->node()));
// b should alias to a (since they are both inputs)
- JIT_ASSERT(
+ AT_ASSERT(
!aliasDb.moveAfterTopologicallyValid(addsB->node(), aMut->node()));
- JIT_ASSERT(aliasDb.moveAfterTopologicallyValid(addsB->node(), c->node()));
+ AT_ASSERT(aliasDb.moveAfterTopologicallyValid(addsB->node(), c->node()));
graph->lint();
}
graph->lint();
auto aliasDb = AliasAnalysis(graph);
- JIT_ASSERT(!aliasDb.moveAfterTopologicallyValid(
+ AT_ASSERT(!aliasDb.moveAfterTopologicallyValid(
aliasesB->node(), mutatesAliasOfB->node()));
- JIT_ASSERT(!aliasDb.moveAfterTopologicallyValid(
+ AT_ASSERT(!aliasDb.moveAfterTopologicallyValid(
usesB->node(), mutatesAliasOfB->node()));
}
}
#include "torch/csrc/jit/custom_operator.h"
#include "torch/csrc/autograd/profiler.h"
-#include "torch/csrc/jit/interned_strings.h"
#include "torch/csrc/utils/functional.h"
#include "torch/csrc/autograd/generated/variable_factories.h"
#include <ATen/ATen.h>
+#include <ATen/core/interned_strings.h>
#include <algorithm>
#include <array>
template<size_t N>
std::array<bool, N> as_bool_array(const std::vector<bool>& vec) {
std::array<bool, N> res;
- JIT_ASSERT(vec.size() == N);
+ AT_ASSERT(vec.size() == N);
std::copy(vec.begin(), vec.end(), res.begin());
return res;
}
#include <torch/csrc/autograd/variable.h>
#include <torch/csrc/jit/ir.h>
#include <torch/csrc/jit/stack.h>
-#include <torch/csrc/jit/type.h>
+#include <ATen/core/jit_type.h>
#include <torch/csrc/jit/variable_tensor_list.h>
#include <torch/csrc/utils/hash.h>
#include <iostream>
for (const auto& i : inputs) {
addInput(i, offset, with_grad);
}
- JIT_ASSERT(offset == num_flat_inputs);
+ AT_ASSERT(offset == num_flat_inputs);
}
void addInput(const IValue& input, size_t& offset, bool with_grad) {
+++ /dev/null
-#pragma once
-
-#include <c10/util/Exception.h>
-
-#define JIT_ASSERT AT_ASSERT
-#define JIT_ASSERTM AT_CHECK
#include <string>
#include <vector>
-#include <torch/csrc/jit/interned_strings.h>
+#include <ATen/core/interned_strings.h>
+#include <c10/util/Exception.h>
namespace torch {
namespace jit {
+using ::c10::Symbol;
+
constexpr int max_tensor_display_size = 10;
enum class AttributeKind { f, fs, i, is, s, ss, t, ts, g, gs };
static inline const char* toString(AttributeKind kind) {
static const char* names[] = {
"f", "fs", "i", "is", "s", "ss", "t", "ts", "g", "gs"};
- JIT_ASSERT(size_t(kind) < sizeof(names) / sizeof(AttributeKind));
+ AT_ASSERT(size_t(kind) < sizeof(names) / sizeof(AttributeKind));
return names[int(kind)];
}
#include <torch/csrc/jit/passes/common_subexpression_elimination.h>
#include <torch/csrc/jit/passes/constant_pooling.h>
#include <torch/csrc/jit/passes/dead_code_elimination.h>
+#include <torch/csrc/jit/passes/lower_tuples.h>
+#include <torch/csrc/jit/script/compiler.h>
+#include <torch/csrc/jit/symbolic_script.h>
#include <torch/csrc/jit/symbolic_variable.h>
#include <torch/csrc/utils/functional.h>
-#include "torch/csrc/jit/passes/lower_tuples.h"
-#include "torch/csrc/jit/script/compiler.h"
-#include "torch/csrc/jit/symbolic_script.h"
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <algorithm>
#include <memory>
} else if (
node->matches(
"aten::avg_pool2d(Tensor self, int[] kernel_size, int[] stride, int[] padding, bool ceil_mode, bool count_include_pad) -> Tensor")) {
- JIT_ASSERT(grads.size() == 1);
+ AT_ASSERT(grads.size() == 1);
auto graph = node->owningGraph();
auto backward_value = graph->insert(
aten::avg_pool2d_backward,
} else if (
node->matches(
"aten::max_pool2d_with_indices(Tensor self, int[] kernel_size, int[] stride, int[] padding, int[] dilation, bool ceil_mode) -> (Tensor, Tensor)")) {
- JIT_ASSERT(grads.size() == 2);
+ AT_ASSERT(grads.size() == 2);
auto graph = node->owningGraph();
auto backward_value = graph->insert(
aten::max_pool2d_with_indices_backward,
Node* tuple_unpack_node =
graph->insertNode(graph->createTupleUnpack(backward_value));
auto tuple_outputs = tuple_unpack_node->outputs();
- JIT_ASSERT(tuple_outputs.size() == size_t(3));
+ AT_ASSERT(tuple_outputs.size() == size_t(3));
return {tuple_outputs[0],
tuple_outputs[1],
nullptr,
Node* tuple_unpack_node =
graph->insertNode(graph->createTupleUnpack(backward_value));
auto tuple_outputs = tuple_unpack_node->outputs();
- JIT_ASSERT(tuple_outputs.size() == size_t(3));
+ AT_ASSERT(tuple_outputs.size() == size_t(3));
return {tuple_outputs[0],
tuple_outputs[1],
tuple_outputs[2],
} else if (node->matches(
"aten::log_softmax(Tensor self, int dim) -> Tensor")) {
- JIT_ASSERT(grads.size() == 1);
+ AT_ASSERT(grads.size() == 1);
auto graph = node->owningGraph();
auto backward_value = graph->insert(
aten::_log_softmax_backward_data,
linearGradientForNode(node, fmap(node->outputs(), get_grad));
LowerSimpleTuples(reverse_block);
- JIT_ASSERT(grad_inputs.size() == node->inputs().size());
+ AT_ASSERT(grad_inputs.size() == node->inputs().size());
for (size_t i = 0, num_inputs = grad_inputs.size(); i < num_inputs; ++i) {
if (!inputs[i]->requires_grad())
continue;
Block* reverse_block = rev_info.reverse_block;
for (Node* top_node : reverse_block->nodes()) {
- JIT_ASSERT(
+ AT_ASSERT(
top_node->kind() == prim::GradOf ||
top_node->kind() == prim::AutogradAdd ||
top_node->kind() == prim::Undefined);
Gradient differentiate(std::shared_ptr<Graph>& graph) {
Gradient grad_desc;
// Take ownership of the graph
- JIT_ASSERTM(
+ AT_CHECK(
graph.use_count() == 1,
"differentiate will mutate and destroy the graph, so it requires "
"graph.use_count() == 1, but found %d",
#pragma once
#include <torch/csrc/WindowsTorchApiMacro.h>
-#include <torch/csrc/jit/ivalue.h>
+#include <ATen/core/ivalue.h>
#include <torch/csrc/jit/scope.h>
#include <torch/csrc/jit/source_range.h>
namespace torch {
namespace jit {
+using ::c10::IValue;
+
struct Graph;
struct Value;
#pragma once
-#include <torch/csrc/jit/function_schema.h>
#include <torch/csrc/jit/operator.h>
#include <torch/csrc/jit/stack.h>
#include <torch/csrc/jit/tracer.h>
#include <torch/csrc/utils/variadic.h>
+#include <ATen/core/function_schema.h>
#include <c10/util/Metaprogramming.h>
#include <c10/util/TypeList.h>
namespace torch {
namespace jit {
namespace detail {
+
+using ::c10::Argument;
+using ::c10::FunctionSchema;
+
/// Checks the static C++ type `T` for correctness to catch common error cases.
template <typename T>
void checkStaticTypes() {
}
void push_back(Vertex<T>* elt) {
- JIT_ASSERT(!elt->visited_);
+ AT_ASSERT(!elt->visited_);
elt->visited_ = true;
data_.push_back(elt);
}
template <typename T>
void DynamicDAG<T>::removeEdge(Vertex<T>* producer, Vertex<T>* consumer) {
- JIT_ASSERT(producer != consumer);
- JIT_ASSERT(producer->out_edges().contains(consumer));
- JIT_ASSERT(consumer->in_edges().contains(producer));
+ AT_ASSERT(producer != consumer);
+ AT_ASSERT(producer->out_edges().contains(consumer));
+ AT_ASSERT(consumer->in_edges().contains(producer));
producer->out_edges().erase(consumer);
consumer->in_edges().erase(producer);
}
*/
template <typename T>
void DynamicDAG<T>::addEdge(Vertex<T>* producer, Vertex<T>* consumer) {
- JIT_ASSERT(producer != consumer);
+ AT_ASSERT(producer != consumer);
// NB: DynamicDAG is a simple graph. If an edge exists already, don't do
// anything.
if (!is_distinct)
return;
is_distinct = consumer->in_edges().insert(producer);
- JIT_ASSERT(is_distinct);
+ AT_ASSERT(is_distinct);
if (producer->ord <= consumer->ord) {
// topological ordering is already consistent, no need to update.
// Search for vertices that can reach producer that have a now incorrect
// topological ordering
- JIT_ASSERT(!dfsSearch(
+ AT_ASSERT(!dfsSearch(
DFSDirection::backward,
producer,
consumer,
// |in_edges(consumer)|))
template <typename T>
bool DynamicDAG<T>::contractEdge(Vertex<T>* producer, Vertex<T>* consumer) {
- JIT_ASSERT(producer != consumer);
+ AT_ASSERT(producer != consumer);
if (contractionProducesCycle(producer, consumer)) {
return false;
}
#include <torch/csrc/jit/export.h>
#include <torch/csrc/onnx/onnx.h>
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/passes/dead_code_elimination.h>
#include <torch/csrc/jit/passes/python_print.h>
#include <torch/csrc/utils/functional.h>
onnx_torch::OperatorExportTypes::ONNX_ATEN_FALLBACK) {
WithInsertPoint guard(node);
auto* new_node = b->owningGraph()->insertNode(b->owningGraph()->create(
- Symbol(::torch::jit::onnx::ATen),
+ Symbol(::c10::onnx::ATen),
node->inputs(),
node->outputs().size()));
for (size_t i = 0; i < node->outputs().size(); ++i) {
onnx::GraphProto* graph_proto,
const Block* block,
const std::vector<at::Tensor>& initializers) {
- JIT_ASSERT(graph_proto != nullptr);
+ AT_ASSERT(graph_proto != nullptr);
std::string block_name = "torch-jit-export";
if (num_blocks_) {
block_name += std::to_string(num_blocks_);
EncodeIntermediateValueInfo(graph_proto, output);
}
if (is_raw_export) {
- JIT_ASSERT(!node->kind().is_onnx());
+ AT_ASSERT(!node->kind().is_onnx());
p_n->set_domain(node->kind().domainString());
} else if (operator_export_type_ == onnx_torch::OperatorExportTypes::ONNX) {
- JIT_ASSERT(node->kind().is_onnx());
+ AT_ASSERT(node->kind().is_onnx());
}
p_n->set_op_type(node->kind().toUnqualString());
for (auto attr_name : node->attributeNames()) {
EncodeBlock(graph, block, initializers);
}
}
- if (node->kind() == torch::jit::onnx::Loop) {
- JIT_ASSERT(node->blocks().size() == 1);
+ if (node->kind() == ::c10::onnx::Loop) {
+ AT_ASSERT(node->blocks().size() == 1);
auto body = p_n->add_attribute();
body->set_name("body");
auto g = body->mutable_g();
EncodeBlock(g, node->blocks()[0]);
}
- if (node->kind() == torch::jit::onnx::If) {
- JIT_ASSERT(node->blocks().size() == 2);
+ if (node->kind() == ::c10::onnx::If) {
+ AT_ASSERT(node->blocks().size() == 2);
auto true_branch = p_n->add_attribute();
true_branch->set_name("then_branch");
}
}
auto num_initializers = initializers.size();
- JIT_ASSERT(block->inputs().size() >= num_initializers);
+ AT_ASSERT(block->inputs().size() >= num_initializers);
size_t inputs_count = block->inputs().size() - num_initializers;
for (auto& tensor : initializers) {
// TODO: stop using positions to determine which initializers
const jit::Node* node,
const jit::Symbol name) {
auto attr = node_proto->add_attribute();
- JIT_ASSERT(name.is_attr());
+ AT_ASSERT(name.is_attr());
attr->set_name(name.toUnqualString());
switch (node->kindOf(name)) {
case AttributeKind::f:
if (defer_weight_export_ && external_ref) {
// For now, we use the name of the tensor as the external lookup name to
// avoid ONNX protobuf changes.
- JIT_ASSERT(external_ref.value() == tensor_proto->name());
- JIT_ASSERT(raw_data_export_map_.count(external_ref.value()) == 0);
+ AT_ASSERT(external_ref.value() == tensor_proto->name());
+ AT_ASSERT(raw_data_export_map_.count(external_ref.value()) == 0);
raw_data_export_map_[external_ref.value()] = t;
tensor_proto->set_raw_data("__EXTERNAL");
} else {
- JIT_ASSERT(t.is_contiguous());
+ AT_ASSERT(t.is_contiguous());
tensor_proto->set_raw_data(std::string(
static_cast<char*>(t.data_ptr()),
t.type().elementSizeInBytes() * t.numel()));
+++ /dev/null
-#include <ATen/core/function_schema.h>
-
-namespace torch {
-namespace jit {
-
-using ::c10::Argument;
-using ::c10::FunctionSchema;
-
-} // namespace jit
-} // namespace torch
#include <torch/csrc/jit/fuser/codegen.h>
#include <ATen/ATen.h>
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/code_template.h>
#include <torch/csrc/jit/fuser/compiler.h>
#include <torch/csrc/jit/fuser/interface.h>
} else if (val.isBool()) {
return scalarValue(val.toBool());
} else {
- JIT_ASSERT(val.isInt());
+ AT_ASSERT(val.isInt());
return scalarValue(val.toInt());
}
}
// Acquires input values
bool has_half_tensor = false;
size_t formal_count = 0;
- for (const auto input : inputs) {
+ for (const auto& input : inputs) {
auto p = input.first;
env.s("node", valueName(p));
env.d("formal", formal_count++);
// Access for other types is common to CUDA and CPU kernels.
const auto is_half = (input.second.scalar_type == at::ScalarType::Half);
if (is_half) {
- JIT_ASSERT(use_cuda);
+ AT_ASSERT(use_cuda);
env.s(
"access",
format("__half2float(t${formal}.data[t${formal}_offset])", env));
if (n->kind() == prim::ConstantChunk)
continue;
if (n->kind() == aten::rand_like) {
- JIT_ASSERT(use_cuda);
+ AT_ASSERT(use_cuda);
has_random = true;
}
env.s("node", valueName(n->output()));
// Note: conversion to half is only supported for CUDA kernels.
const auto is_half = (output.second.scalar_type == at::ScalarType::Half);
if (is_half) {
- JIT_ASSERT(use_cuda);
+ AT_ASSERT(use_cuda);
body << format("${access} = __float2half(${node});\n", env);
has_half_tensor = true;
} else {
#include <torch/csrc/jit/fuser/compiler.h>
#include <ATen/ATen.h>
-#include <torch/csrc/jit/assertions.h>
+#include <ATen/core/jit_type.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/code_template.h>
#include <torch/csrc/jit/fuser/codegen.h>
#include <torch/csrc/jit/fuser/interface.h>
#include <torch/csrc/jit/ir.h>
#include <torch/csrc/jit/passes/canonicalize.h>
#include <torch/csrc/jit/passes/shape_analysis.h>
-#include <torch/csrc/jit/type.h>
-#include "torch/csrc/jit/fuser/interface.h"
#include <atomic>
#include <iostream>
// be a valid spec (must have had upfrontCompilation run on it).
const auto key = store(graph);
const auto maybe_retrieved_spec = retrieve(key);
- JIT_ASSERT(maybe_retrieved_spec);
+ AT_ASSERT(maybe_retrieved_spec);
upfrontCompilation(**maybe_retrieved_spec);
return key;
#pragma once
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/utils/disallow_copy.h>
namespace torch {
-
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/fuser/cpu/dynamic_library.h>
#include <torch/csrc/utils/disallow_copy.h>
}
void* DynamicLibrary::sym(const char* name) {
- JIT_ASSERT(handle);
+ AT_ASSERT(handle);
return checkDL(dlsym(handle, name));
}
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/fuser/cpu/dynamic_library.h>
#include <torch/csrc/utils/disallow_copy.h>
#include <torch/csrc/jit/fuser/cpu/fused_kernel.h>
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/code_template.h>
#include <torch/csrc/jit/fuser/compiler.h>
#include <torch/csrc/jit/fuser/cpu/dynamic_library.h>
config.openmp = false; // disable for future compiles
return runCompiler(cpp_file, so_file);
}
- JIT_ASSERTM(r == 0, "Failed to compile a fused CPU kernel");
+ AT_CHECK(r == 0, "Failed to compile a fused CPU kernel");
}
static const std::string disas_string = "objdump -M intel -d \"${so_file}\"";
env.s("so_file", so_file);
std::string cmd = format(disas_string, env);
int r = system(cmd.c_str());
- JIT_ASSERT(r == 0);
+ AT_ASSERT(r == 0);
}
FusedKernelCPU::FusedKernelCPU(
#include <ATen/ATen.h>
#include <torch/csrc/WindowsTorchApiMacro.h>
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/utils/disallow_copy.h>
#include <unistd.h>
// so we make a copy of the string here, including null terminator
std::vector<char> tt(t.c_str(), t.c_str() + t.size() + 1);
int fd = mkstemps(tt.data(), suffix);
- JIT_ASSERT(fd != -1);
+ AT_ASSERT(fd != -1);
file_ = fdopen(fd, "r+");
// - 1 becuase tt.size() includes the null terminator,
void write(const std::string& str) {
size_t result = fwrite(str.c_str(), 1, str.size(), file_);
- JIT_ASSERT(str.size() == result);
+ AT_ASSERT(str.size() == result);
}
FILE* file() {
TORCH_NVRTC_CHECK(nvrtcVersion(&nvrtc_major, &nvrtc_minor));
// Short-circuits if NVRTC version too low
- JIT_ASSERT(nvrtc_major >= 6);
+ AT_ASSERT(nvrtc_major >= 6);
// Major and minor is determined by device properties and
// possibly "downcompiled" to a lower (compatible) compute architecture
const at::Tensor& tensor,
const PartitionDesc& chunkDesc) {
std::vector<int64_t> sizes(tensor.sizes().begin(), tensor.sizes().end());
- JIT_ASSERT(sizes[chunkDesc.dim()] % chunkDesc.nSubTensors() == 0);
+ AT_ASSERT(sizes[chunkDesc.dim()] % chunkDesc.nSubTensors() == 0);
sizes[chunkDesc.dim()] /= chunkDesc.nSubTensors();
return sizes;
}
size_t total_size = sizes[cur];
cur++;
while (cont[cur - 1] && cur < ndim) {
- JIT_ASSERT(strides[cur - 1] == sizes[cur] * strides[cur]);
+ AT_ASSERT(strides[cur - 1] == sizes[cur] * strides[cur]);
total_size *= sizes[cur];
cur++;
}
}
if (ndim > 0)
- JIT_ASSERT(!cont.back() || strides.back() == 1);
+ AT_ASSERT(!cont.back() || strides.back() == 1);
}
// Launches the requested fusion on the given device with the given inputs.
const at::ArrayRef<at::Tensor>& inputs,
std::vector<at::Tensor>& outputs) {
// Fails if fusion and given inputs disagree
- JIT_ASSERT(inputs.size() == fusion.inputDesc().size());
+ AT_ASSERT(inputs.size() == fusion.inputDesc().size());
// Computes number of flattened inputs and outputs
size_t flat_inputs_size = 0;
// a 32-bit integer.
// Note: this code assumes that inputs are 32-bit addressable
// Note: this code assumes that all inputs are of the same size
- JIT_ASSERT(inputs[0].numel() <= std::numeric_limits<uint32_t>::max());
+ AT_ASSERT(inputs[0].numel() <= std::numeric_limits<uint32_t>::max());
// Computes map_size, numel from the first input
at::IntList map_size;
at::IntList sizes,
at::IntList strides) {
const auto nDim = desc.nDim(); // NOTE: this is the compressed dim
- JIT_ASSERT(nDim <= uncompressedDim); // We'd overflow the space otherwise
+ AT_ASSERT(nDim <= uncompressedDim); // We'd overflow the space otherwise
auto ti = reinterpret_cast<TensorInfo*>(buffer_next);
ti->data = data_ptr;
compressContiguous(
// Acquires the FusionSpec
auto maybe_spec = retrieve(key);
- JIT_ASSERT(maybe_spec);
+ AT_ASSERT(maybe_spec);
auto& spec = *(*maybe_spec);
// Acquires inputs from stack
spec.cacheKernel(arg_spec, kernel);
}
maybe_kernel = spec.findKernel(arg_spec);
- JIT_ASSERT(maybe_kernel);
+ AT_ASSERT(maybe_kernel);
// Launches fusion
std::vector<at::Tensor> outputs;
#pragma once
#include <torch/csrc/WindowsTorchApiMacro.h>
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/fuser/tensor_desc.h>
#include <cstdint>
PartitionDesc(const TensorDesc& _desc, size_t _nSubTensors, size_t _dim)
: nSubTensors_{_nSubTensors}, dim_{_dim} {
- JIT_ASSERT(nSubTensors_ > 1);
+ AT_ASSERT(nSubTensors_ > 1);
std::vector<bool> cont = _desc.contiguity;
if (dim_ > 0) {
// when we narrow the concatenated output/chunked input
#include <ATen/ATen.h>
#include <torch/csrc/WindowsTorchApiMacro.h>
-#include <torch/csrc/jit/assertions.h>
-#include <torch/csrc/jit/type.h>
+#include <c10/util/Exception.h>
+#include <ATen/core/jit_type.h>
#include <torch/csrc/utils/hash.h>
#include <algorithm>
TensorDesc(const at::Tensor& t)
: TensorDesc(t.type().scalarType(), t.sizes(), t.strides()) {}
- TensorDesc(const CompleteTensorTypePtr& type)
+ TensorDesc(const c10::CompleteTensorTypePtr& type)
: TensorDesc(type->scalarType(), type->sizes(), type->strides()) {}
// number of dimensions after contiguity compression
static std::vector<bool> findContiguous(
const at::IntList& sizes,
const at::IntList& strides) {
- JIT_ASSERT(sizes.size() == strides.size());
+ AT_ASSERT(sizes.size() == strides.size());
std::vector<bool> cont(sizes.size());
for (size_t i = 0; i < sizes.size(); ++i) {
const auto expected_stride =
#include <torch/csrc/autograd/grad_mode.h>
#include <torch/csrc/jit/argument_spec.h>
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/autodiff.h>
#include <torch/csrc/jit/custom_operator.h>
#include <torch/csrc/jit/interpreter.h>
#include <torch/csrc/jit/ir.h>
-#include <torch/csrc/jit/ivalue.h>
+#include <ATen/core/ivalue.h>
#include <torch/csrc/jit/passes/batch_mm.h>
#include <torch/csrc/jit/passes/canonicalize_ops.h>
#include <torch/csrc/jit/passes/common_subexpression_elimination.h>
}
executor.run(stack);
- JIT_ASSERT(stack.size() == num_outputs());
+ AT_ASSERT(stack.size() == num_outputs());
variable_list outputs;
outputs.reserve(num_outputs());
};
void packGradient(Gradient gradient, Node* dnode) {
- JIT_ASSERT(dnode->kind() == prim::DifferentiableGraph);
+ AT_ASSERT(dnode->kind() == prim::DifferentiableGraph);
dnode->g_(attr::Subgraph, gradient.f)
->g_(attr::ReverseSubgraph, gradient.df)
->i_(attr::f_real_outputs, gradient.f_real_outputs)
}
Gradient getGradient(const Node* n) {
- JIT_ASSERT(n->kind() == prim::DifferentiableGraph);
+ AT_ASSERT(n->kind() == prim::DifferentiableGraph);
Gradient grad;
grad.f = n->g(attr::Subgraph);
grad.df = n->g(attr::ReverseSubgraph);
}
std::shared_ptr<Graph> graphFor(const Stack& stack) const {
- JIT_ASSERT(stack.size() >= num_inputs);
+ AT_ASSERT(stack.size() >= num_inputs);
auto inputs = last(stack, num_inputs);
ArgumentSpec spec(
autograd::GradMode::is_enabled(), inputs, num_flat_inputs);
#pragma once
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
namespace torch {
namespace jit {
return cur;
}
generic_graph_node_list_iterator& operator++() {
- JIT_ASSERT(cur);
+ AT_ASSERT(cur);
cur = cur->next_in_graph[d];
return *this;
}
return old;
}
generic_graph_node_list_iterator& operator--() {
- JIT_ASSERT(cur);
+ AT_ASSERT(cur);
cur = cur->next_in_graph[reverseDir()];
return *this;
}
#include <google/protobuf/util/json_util.h>
#include <google/protobuf/util/type_resolver_util.h>
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/import.h>
#include <torch/csrc/jit/import_method.h>
#include <torch/csrc/jit/ir.h>
#include <torch/csrc/jit/argument_spec.h>
#include <torch/csrc/jit/batched/BatchTensor.h>
#include <torch/csrc/jit/export.h>
-#include <torch/csrc/jit/function_schema.h>
#include <torch/csrc/jit/fuser/interface.h>
#include <torch/csrc/jit/fuser/kernel_cache.h>
#include <torch/csrc/jit/graph_executor.h>
#include <caffe2/serialize/inline_container.h>
+#include <ATen/core/function_schema.h>
+
#include <pybind11/functional.h>
#include <memory>
namespace torch {
namespace jit {
+using ::c10::Argument;
+using ::c10::FunctionSchema;
using caffe2::serialize::PyTorchStreamReader;
using caffe2::serialize::PyTorchStreamWriter;
with_grad,
evilDeprecatedBadCreateStackDoNotUse(inputs, graph->inputs()));
auto graph_inputs = graph->inputs();
- JIT_ASSERT(spec.size() == graph_inputs.size());
+ AT_ASSERT(spec.size() == graph_inputs.size());
for (size_t i = 0; i < graph_inputs.size(); ++i) {
graph_inputs[i]->setType(spec.at(i));
}
+++ /dev/null
-#include <ATen/core/interned_strings.h>
-
-namespace torch {
-namespace jit {
-
-namespace prim {
-using namespace ::c10::prim;
-}
-namespace attr {
-using namespace ::c10::attr;
-}
-namespace aten {
-using namespace ::c10::aten;
-}
-namespace onnx {
-using namespace ::c10::onnx;
-}
-using ::c10::Symbol;
-} // namespace jit
-} // namespace torch
#include <torch/csrc/autograd/grad_mode.h>
#include <torch/csrc/autograd/profiler.h>
#include <torch/csrc/autograd/variable.h>
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/constants.h>
#include <torch/csrc/jit/graph_executor.h>
#include <torch/csrc/jit/ir.h>
-#include <torch/csrc/jit/ivalue.h>
+#include <ATen/core/ivalue.h>
#include <torch/csrc/jit/operator.h>
#include <torch/csrc/jit/script/jit_exception.h>
#include <ATen/core/thread_pool.h>
// In other words, we find the first program point for v that
// _reverse_ dominates the definition of v, and add a drop point there.
Node* same_depth_node = findOwnerInBlock(n, v->node()->owningBlock());
- JIT_ASSERT(
+ AT_ASSERT(
same_depth_node); // failure means v is not in scope for n, use lint!
// In the case where v and n are in the same block, just mark
// jump when input is false
void createJumpFalse(int from_inst, int to_inst) {
auto& inst = instructions[from_inst];
- JIT_ASSERT(inst.debug_name == prim::Placeholder);
+ AT_ASSERT(inst.debug_name == prim::Placeholder);
auto offset = relativeJump(from_inst, to_inst);
inst.callback = [offset](Stack& stack) {
auto t = pop(stack).toBool();
// jump when input is true
void createJumpTrue(int from_inst, int to_inst) {
auto& inst = instructions[from_inst];
- JIT_ASSERT(inst.debug_name == prim::Placeholder);
+ AT_ASSERT(inst.debug_name == prim::Placeholder);
auto offset = relativeJump(from_inst, to_inst);
inst.callback = [offset](Stack& stack) {
auto t = pop(stack).toBool();
void createJump(int from_inst, int to_inst) {
auto& inst = instructions[from_inst];
- JIT_ASSERT(inst.debug_name == prim::Placeholder);
+ AT_ASSERT(inst.debug_name == prim::Placeholder);
auto offset = relativeJump(from_inst, to_inst);
inst.callback = [=](Stack& stack) { return offset; };
inst.debug_name = prim::Jump;
list.size = 0;
}
void listInsert(ListHandle<int>& list, int value) {
- JIT_ASSERTM(
+ AT_CHECK(
list.start + list.size == (int)int_data.size(),
"another list already started");
int_data.push_back(value);
list.size = 0;
}
void listInsert(ListHandle<bool>& list, int value) {
- JIT_ASSERTM(
+ AT_CHECK(
list.start + list.size == (int)bool_data.size(),
"another list already started");
bool_data.push_back(value);
void aliasRegistersTo(
ArrayRef<Value*> new_allocations,
ArrayRef<Value*> existing_allocations) {
- JIT_ASSERT(new_allocations.size() == existing_allocations.size());
+ AT_ASSERT(new_allocations.size() == existing_allocations.size());
for (size_t i = 0; i < new_allocations.size(); ++i) {
auto n = new_allocations[i]->unique();
auto e = existing_allocations[i]->unique();
- JIT_ASSERT(unique_to_reg.count(e) > 0 && unique_to_reg.count(n) == 0);
+ AT_ASSERT(unique_to_reg.count(e) > 0 && unique_to_reg.count(n) == 0);
unique_to_reg[n] = unique_to_reg[e];
}
}
size_t u = n->unique();
if (unique_to_reg.count(u) > 0)
return unique_to_reg[u];
- JIT_ASSERT(!required);
+ AT_ASSERT(!required);
int r = register_size++;
unique_to_reg[u] = r;
return r;
pc = new_pc;
} catch (Suspend& e) {
// wait() expects a single input
- JIT_ASSERT(inst.inputs.values.size == 1);
+ AT_ASSERT(inst.inputs.values.size == 1);
getOrCreateFuture();
#include <vector>
#include <torch/csrc/WindowsTorchApiMacro.h>
-#include <torch/csrc/jit/ivalue.h>
+#include <ATen/core/ivalue.h>
namespace at {
class Tensor;
struct Graph;
struct Node;
using Stack = std::vector<c10::IValue>;
+using c10::ivalue::Future;
+using c10::ivalue::Tuple;
struct TORCH_API Code {
Code() : pImpl(nullptr) {}
#include <torch/csrc/jit/ir.h>
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/constants.h>
#include <torch/csrc/jit/operator.h>
#include <torch/csrc/jit/passes/python_print.h>
has_device = true;
device = type->device();
} else {
- JIT_ASSERT(device == type->device());
+ AT_ASSERT(device == type->device());
}
}
};
for (auto input : inputs_) {
// WARNING: O(n^2)
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
- JIT_ASSERT(
+ AT_ASSERT(
std::find(ALL_OF(input->uses_), Use(const_cast<Node*>(this), i)) !=
input->uses_.end());
- JIT_ASSERT(graph_->all_nodes.count(this) == 1);
+ AT_ASSERT(graph_->all_nodes.count(this) == 1);
i++;
}
}
// Use invariants
// - Use is consistent with inputs
// - Every user node is live (checked in Graph)
- JIT_ASSERT(use.user->inputs_[use.offset] == o);
+ AT_ASSERT(use.user->inputs_[use.offset] == o);
i++;
}
}
// Node subclass invariants
IR_IF(this, Constant)
- JIT_ASSERT(inputs_.size() == 0);
+ AT_ASSERT(inputs_.size() == 0);
IR_ELSEIF(Return)
// Return uses is zero
- JIT_ASSERT(outputs().size() == 0);
+ AT_ASSERT(outputs().size() == 0);
IR_ELSEIF(Param)
// Param inputs is zero
- JIT_ASSERT(inputs_.size() == 0);
+ AT_ASSERT(inputs_.size() == 0);
IR_ELSEIFM_CONST(PythonOp)
// Python operator cconv is correct
size_t n_scalars = 0, n_tensors = 0;
} else if (c == 'd') {
n_tensors++;
} else {
- JIT_ASSERT(0);
+ AT_ASSERT(0);
}
- JIT_ASSERT(static_cast<bool>(value->pyobj));
+ AT_ASSERT(static_cast<bool>(value->pyobj));
}
- JIT_ASSERT(n_scalars == value->scalar_args.size());
- JIT_ASSERT(n_tensors == inputs_.size());
+ AT_ASSERT(n_scalars == value->scalar_args.size());
+ AT_ASSERT(n_tensors == inputs_.size());
IR_ELSEIF(Eval)
// TODO: add invariants
// TODO: It's not good for these ops to be top-level, it makes cases longer.
return nodes.count(n) > 0 || (parent && parent->contains(n));
}
void insert(const Value* v) {
- JIT_ASSERT(!contains(v));
+ AT_ASSERT(!contains(v));
values.insert(v);
}
void insert(const Node* n) {
- JIT_ASSERT(!contains(n));
+ AT_ASSERT(!contains(n));
nodes.insert(n);
}
std::unique_ptr<LintScope> parent;
void check_value(const Value* v) {
scope->insert(v);
auto b2 = seen_uniques.insert(v->unique());
- JIT_ASSERT(b2.second); // insertion took place
- JIT_ASSERT(v->unique() < g.next_unique_);
+ AT_ASSERT(b2.second); // insertion took place
+ AT_ASSERT(v->unique() < g.next_unique_);
for (auto use : v->uses()) {
- JIT_ASSERT(!scope->contains(use.user));
- JIT_ASSERT(g.all_nodes.count(use.user) == 1);
+ AT_ASSERT(!scope->contains(use.user));
+ AT_ASSERT(g.all_nodes.count(use.user) == 1);
anticipated_uses[use.user]++; // int default constructs to 0
}
}
void check_node(const Node* n) {
for (auto input : n->inputs_) {
if (!scope->contains(input)) {
- JIT_ASSERTM(0, input->unique(), " not in scope");
+ AT_ASSERTM(0, input->unique(), " not in scope");
}
}
- JIT_ASSERT(
+ AT_ASSERT(
anticipated_uses[n] == static_cast<int64_t>(n->inputs_.size()));
anticipated_uses[n] = -1; // we saw the anticipated user!
scope->insert(n);
}
size_t i = 0;
for (auto o : n->outputs()) {
- JIT_ASSERT(o->node() == n);
- JIT_ASSERT(i++ == o->offset_);
+ AT_ASSERT(o->node() == n);
+ AT_ASSERT(i++ == o->offset_);
check_value(o);
}
n->lint();
}
void check_block(const Block* b) {
// Check topological ordering
- JIT_ASSERT(b->param_node()->isBefore(*b->nodes().begin()));
+ AT_ASSERT(b->param_node()->isBefore(*b->nodes().begin()));
auto curNode = *b->nodes().begin();
while (curNode != b->return_node()) {
- JIT_ASSERT(curNode->isBefore(curNode->next()));
+ AT_ASSERT(curNode->isBefore(curNode->next()));
curNode = curNode->next();
}
for (auto input : b->inputs()) {
check_value(input);
- JIT_ASSERT(input->node()->kind_ == prim::Param);
+ AT_ASSERT(input->node()->kind_ == prim::Param);
}
for (auto n : b->nodes()) {
- JIT_ASSERT(n->kind_ != prim::Param);
- JIT_ASSERT(n->kind_ != prim::Return);
+ AT_ASSERT(n->kind_ != prim::Param);
+ AT_ASSERT(n->kind_ != prim::Return);
check_node(n);
}
- JIT_ASSERT(b->output_->kind() == prim::Return);
+ AT_ASSERT(b->output_->kind() == prim::Return);
check_node(b->output_);
// all_nodes
node_set output_set{b->output_};
// TODO: Make a more type safe std::includes wrapper which disallows use
// on non-ordered containers
- JIT_ASSERT(std::includes(ALL_OF(all_nodes_set), ALL_OF(nodes_set)));
- JIT_ASSERT(std::includes(ALL_OF(all_nodes_set), ALL_OF(inputs_set)));
- JIT_ASSERT(std::includes(ALL_OF(all_nodes_set), ALL_OF(output_set)));
+ AT_ASSERT(std::includes(ALL_OF(all_nodes_set), ALL_OF(nodes_set)));
+ AT_ASSERT(std::includes(ALL_OF(all_nodes_set), ALL_OF(inputs_set)));
+ AT_ASSERT(std::includes(ALL_OF(all_nodes_set), ALL_OF(output_set)));
sum_set.insert(ALL_OF(nodes_set));
sum_set.insert(ALL_OF(inputs_set));
check_block(g.block_);
for (auto kv : anticipated_uses) {
- JIT_ASSERT(kv.second == -1);
+ AT_ASSERT(kv.second == -1);
}
- JIT_ASSERT(std::includes(ALL_OF(sum_set), ALL_OF(all_nodes_set)));
+ AT_ASSERT(std::includes(ALL_OF(sum_set), ALL_OF(all_nodes_set)));
}
};
LintImpl(*this).check_graph();
}
void Value::replaceFirstUseWith(Value* newValue) {
- JIT_ASSERT(owningGraph() == newValue->owningGraph());
+ AT_ASSERT(owningGraph() == newValue->owningGraph());
auto u = uses()[0];
u.user->inputs_[u.offset] = newValue;
newValue->uses_.push_back(u);
}
void Node::eraseOutput(size_t i) {
- JIT_ASSERT(i < outputs_.size());
- JIT_ASSERT(outputs_[i]->uses().empty());
+ AT_ASSERT(i < outputs_.size());
+ AT_ASSERT(outputs_[i]->uses().empty());
schema_ = nullptr;
Value* n = outputs_[i];
outputs_.erase(outputs_.begin() + i);
}
void Node::eraseBlock(size_t i) {
- JIT_ASSERT(i < blocks_.size());
+ AT_ASSERT(i < blocks_.size());
schema_ = nullptr;
Block* n = blocks_[i];
blocks_.erase(blocks_.begin() + i);
}
void Node::replaceAllUsesWith(Node* n) {
- JIT_ASSERT(outputs().size() == n->outputs().size());
+ AT_ASSERT(outputs().size() == n->outputs().size());
size_t nOutputs = outputs().size();
for (size_t i = 0; i < nOutputs; i++) {
outputs()[i]->replaceAllUsesWith(n->outputs()[i]);
}
Value* Node::insertInput(size_t i, Value* value) {
- JIT_ASSERT(graph_ == value->owningGraph());
+ AT_ASSERT(graph_ == value->owningGraph());
schema_ = nullptr;
// First we update the offsets for all existing inputs that will reside
// after the one we're inserting. Concretely, these are the inputs at
}
Value* Node::addInput(Value* value) {
- JIT_ASSERT(graph_ == value->owningGraph());
+ AT_ASSERT(graph_ == value->owningGraph());
schema_ = nullptr;
value->uses_.emplace_back(this, inputs_.size());
inputs_.push_back(value);
}
Value* Node::replaceInput(size_t i, Value* newValue) {
- JIT_ASSERT(newValue->owningGraph() == graph_);
+ AT_ASSERT(newValue->owningGraph() == graph_);
schema_ = nullptr;
Value* old = dropInput(i);
inputs_[i] = newValue;
}
void Node::replaceInputWith(Value* from, Value* to) {
- JIT_ASSERT(from->owningGraph() == graph_);
- JIT_ASSERT(to->owningGraph() == graph_);
+ AT_ASSERT(from->owningGraph() == graph_);
+ AT_ASSERT(to->owningGraph() == graph_);
schema_ = nullptr;
size_t i = 0;
for (auto input : inputs()) {
return this->topo_position_ > n->topo_position_;
}
- JIT_ASSERT(this == n);
+ AT_ASSERT(this == n);
return false;
}
// until we find the first common block.
auto lhs = this;
while (lhs) {
- JIT_ASSERT(lhs->owningBlock());
+ AT_ASSERT(lhs->owningBlock());
auto rhs = n;
while (rhs) {
lhs = lhs->owningBlock()->owningNode();
}
// should never reach here, since both nodes are ultimately in the same graph
- JIT_ASSERT(false);
+ AT_ASSERT(false);
}
bool Node::isBefore(const Node* n) const {
}
Node* Node::insertBefore(Node* n) {
- JIT_ASSERT(n->inBlockList());
+ AT_ASSERT(n->inBlockList());
insertAfter(n->prev());
return this;
}
Node* Node::insertAfter(Node* n) {
- JIT_ASSERT(!inBlockList() && n->inBlockList());
- JIT_ASSERT(n->owningBlock());
+ AT_ASSERT(!inBlockList() && n->inBlockList());
+ AT_ASSERT(n->owningBlock());
this->owning_block_ = n->owningBlock();
Node* next = n->next();
n->next() = this;
// O(N) on the use list, but unless we get nodes with +100 uses
// vector traversal still is probably faster than linked list
auto use_it = std::find(input_uses.begin(), input_uses.end(), Use(this, i));
- JIT_ASSERT(use_it != input_uses.end());
+ AT_ASSERT(use_it != input_uses.end());
return use_it;
}
Value* Node::dropInput(size_t i) {
- JIT_ASSERT(i < inputs_.size());
+ AT_ASSERT(i < inputs_.size());
auto input_node = inputs_[i];
auto use_it = findUseForInput(i);
input_node->uses_.erase(use_it);
}
void Node::removeFromList() {
- JIT_ASSERT(inBlockList());
+ AT_ASSERT(inBlockList());
this->owning_block_ = nullptr;
Node* next = this->next();
Node* prev = this->prev();
Node* Graph::createList(const TypePtr& elem_type, at::ArrayRef<Value*> values) {
auto n = create(prim::ListConstruct, values);
for (const auto& v : values) {
- JIT_ASSERT(v->type()->isSubtypeOf(elem_type));
+ AT_ASSERT(v->type()->isSubtypeOf(elem_type));
}
n->output()->setType(ListType::create(elem_type));
return n;
void Graph::freeNode(Node* n) {
auto it = all_nodes.find(n);
- JIT_ASSERT(it != all_nodes.end());
+ AT_ASSERT(it != all_nodes.end());
delete *it;
all_nodes.erase(it);
}
void Graph::freeValue(Value* v) {
v->setUniqueName("");
auto it = all_values.find(v);
- JIT_ASSERT(it != all_values.end());
+ AT_ASSERT(it != all_values.end());
delete *it;
all_values.erase(it);
}
void Graph::freeBlock(Block* b) {
auto it = all_blocks.find(b);
- JIT_ASSERT(it != all_blocks.end());
+ AT_ASSERT(it != all_blocks.end());
delete *it;
all_blocks.erase(it);
}
bool unpack_outputs) {
std::unordered_map<Value*, Value*> value_map;
auto value_map_func = [&](Value* v) { return value_map.at(v); };
- JIT_ASSERT(callee.inputs().size() == inputs.size());
+ AT_ASSERT(callee.inputs().size() == inputs.size());
for (size_t i = 0; i < inputs.size(); ++i) {
value_map[callee.inputs()[i]] = inputs[i];
}
#pragma once
-#include <torch/csrc/jit/assertions.h>
#include <torch/csrc/jit/attributes.h>
-#include <torch/csrc/jit/constants.h>
-#include <torch/csrc/jit/function_schema.h>
#include <torch/csrc/jit/generic_if.h>
#include <torch/csrc/jit/graph_node_list.h>
-#include <torch/csrc/jit/interned_strings.h>
-#include <torch/csrc/jit/ivalue.h>
#include <torch/csrc/jit/named_value.h>
#include <torch/csrc/jit/resource_guard.h>
#include <torch/csrc/jit/scope.h>
-#include <torch/csrc/jit/type.h>
#include <torch/csrc/WindowsTorchApiMacro.h>
#include <torch/csrc/utils/disallow_copy.h>
#include <torch/csrc/utils/object_ptr.h>
#include <ATen/ATen.h>
+#include <ATen/core/function_schema.h>
+#include <ATen/core/interned_strings.h>
+#include <ATen/core/ivalue.h>
+#include <ATen/core/jit_type.h>
#include <c10/util/ArrayRef.h>
+#include <c10/util/Exception.h>
#include <algorithm>
#include <atomic>
namespace torch {
namespace jit {
+using ::c10::Symbol;
+using ::c10::Argument;
+using ::c10::FunctionSchema;
+
+using ::c10::ivalue::List;
+using ::c10::ivalue::Shared;
+
+using ::c10::IValue;
+using ::c10::ivalue::Future;
+using ::c10::ivalue::Tuple;
+
+using ::c10::ivalue::BoolList;
+using ::c10::ivalue::DoubleList;
+using ::c10::ivalue::GenericList;
+using ::c10::ivalue::IntList;
+using ::c10::ivalue::TensorList;
+
+using ::c10::ivalue::ConstantString;
+
+#define C10_USING(T) using ::c10::T;
+C10_FORALL_TYPES(C10_USING)
+#undef C10_USING
+
+#define C10_USING(T) using ::c10::T##Ptr;
+C10_FORALL_TYPES(C10_USING)
+#undef C10_USING
+
+using ::c10::Type;
+using ::c10::TypeEnv;
+using ::c10::TypePtr;
+
+using ::c10::getTypePtr;
+using ::c10::MatchTypeReturn;
+using ::c10::TypeKind;
+
+namespace prim {
+using namespace ::c10::prim;
+}
+namespace attr {
+using namespace ::c10::attr;
+}
+namespace aten {
+using namespace ::c10::aten;
+}
+
// Graph represents one "function" of computation.
// It uses a simple ownership model where the graph owns all the nodes inside
// it. All references inside the graph are raw pointers. Destroying the Graph
setType(CompleteTensorType::create(output));
}
const TypePtr& type() const {
- JIT_ASSERT(type_ != nullptr);
+ AT_ASSERT(type_ != nullptr);
return type_;
}
bool requires_grad() const {
// lots of things like chunk have a single input or single output, so we have
// a helper to make accessing it easier
Value* input() {
- JIT_ASSERT(inputs_.size() == 1);
+ AT_ASSERT(inputs_.size() == 1);
return inputs_.at(0);
}
Value* output() {
- JIT_ASSERT(outputs_.size() == 1);
+ AT_ASSERT(outputs_.size() == 1);
return outputs_.at(0);
}
const Value* output() const {
- JIT_ASSERT(outputs_.size() == 1);
+ AT_ASSERT(outputs_.size() == 1);
return outputs_.at(0);
}
const Value* input() const {
- JIT_ASSERT(inputs_.size() == 1);
+ AT_ASSERT(inputs_.size() == 1);
return inputs_.at(0);
}
// Access a particular input. This is a checked index.
}
template <typename T>
T* expect() {
- JIT_ASSERTM(
+ AT_CHECK(
T::Kind == kind(),
"expected a ",
T::Kind.toDisplayString(),
}
}
bool hasAttribute(Symbol name) const {
- JIT_ASSERT(name.is_attr());
+ AT_ASSERT(name.is_attr());
return findAttr(name, false) != values_.end();
}
bool hasAttributeS(const std::string& name) const {
return hasAttribute(Symbol::attr(name));
}
AttributeKind kindOf(Symbol name) const {
- JIT_ASSERT(name.is_attr());
+ AT_ASSERT(name.is_attr());
return (*findAttr(name, true))->kind();
}
AttributeKind kindOfS(const std::string& name) const {
return kindOf(Symbol::attr(name));
}
Node* removeAttribute(Symbol name) {
- JIT_ASSERT(name.is_attr());
+ AT_ASSERT(name.is_attr());
values_.erase(findAttr(name, true));
return this;
}
// does not use CREATE_ACCESSOR because we need additional asserts
Node* t_(Symbol name, TensorAttr::ConstructorType v) {
- JIT_ASSERT(!v.defined() || !v.is_variable());
+ AT_ASSERT(!v.defined() || !v.is_variable());
return setAttr<TensorAttr>(name, std::forward<TensorAttr::ConstructorType>(v));
}
const TensorAttr::ValueType& t(Symbol name) const {
Node* ts_(Symbol name, TensorsAttr::ConstructorType v) {
for (auto& t : v) {
- JIT_ASSERT(!t.defined() || !t.is_variable());
+ AT_ASSERT(!t.defined() || !t.is_variable());
}
return setAttr<TensorsAttr>(
name, std::forward<TensorsAttr::ConstructorType>(v));
template <typename T>
Node* setAttr(Symbol name, typename T::ConstructorType v) {
- JIT_ASSERT(name.is_attr());
+ AT_ASSERT(name.is_attr());
auto it = findAttr(name, false);
auto nv = AVPtr(new T(name, std::forward<typename T::ConstructorType>(v)));
if (it == values_.end()) {
}
template <typename T>
typename T::ValueType& getAttr(Symbol name) const {
- JIT_ASSERT(name.is_attr());
+ AT_ASSERT(name.is_attr());
auto it = findAttr(name, true);
auto* child = dynamic_cast<T*>(it->get());
if (child == nullptr) {
// a big pile of messages.
std::vector<AVPtr> values_;
std::vector<AVPtr>::iterator findAttr(Symbol name, bool required) {
- JIT_ASSERT(name.is_attr());
+ AT_ASSERT(name.is_attr());
auto it = std::find_if(values_.begin(), values_.end(), [&](const AVPtr& v) {
return v->name == name;
});
if (required && it == values_.end()) {
throw AttributeError(name, false);
}
- JIT_ASSERT(!required || it != values_.end());
+ AT_ASSERT(!required || it != values_.end());
return it;
}
std::vector<AVPtr>::const_iterator findAttr(Symbol name, bool required) const {
- JIT_ASSERT(name.is_attr());
+ AT_ASSERT(name.is_attr());
auto it = std::find_if(values_.begin(), values_.end(), [&](const AVPtr& v) {
return v->name == name;
});
if (required && it == values_.end()) {
throw AttributeError(name, false);
}
- JIT_ASSERT(!required || it != values_.end());
+ AT_ASSERT(!required || it != values_.end());
return it;
}
bool inBlockList() const {
if (next() == nullptr) {
- JIT_ASSERT(prev() == nullptr);
+ AT_ASSERT(prev() == nullptr);
}
return next() != nullptr;
}
output_->removeInput(i);
}
Node* appendNode(Node* n) {
- JIT_ASSERT(n->graph_ == graph_ && !n->inBlockList());
+ AT_ASSERT(n->graph_ == graph_ && !n->inBlockList());
n->insertBefore(output_);
return n;
}
Node* prependNode(Node* n) {
- JIT_ASSERT(n->graph_ == graph_ && !n->inBlockList());
+ AT_ASSERT(n->graph_ == graph_ && !n->inBlockList());
n->insertAfter(output_);
return n;
}
// initialized to insert at the end of the top level block
// can be changed with setInsertPoint()
Node* insertNode(Node* n) {
- JIT_ASSERT(
+ AT_ASSERT(
insert_before_->inBlockList() &&
"insert point node is no longer in a block list");
return n->insertBefore(insert_before_);
}
// set where nodes are inserted to append to the end of this block
void setInsertPoint(Block* b) {
- JIT_ASSERT(b->owningGraph() == this);
+ AT_ASSERT(b->owningGraph() == this);
insert_before_ = b->return_node();
}
// set where nodes are inserted to insert _before_ this node
// for implementation simplicity we only support inserting before a node for
// now
void setInsertPoint(Node* n) {
- JIT_ASSERT(n->owningGraph() == this && n->inBlockList());
+ AT_ASSERT(n->owningGraph() == this && n->inBlockList());
insert_before_ = n;
}
Node* insertPoint() {
}
inline Value* Value::setType(TypePtr type) {
- JIT_ASSERT(type);
+ AT_ASSERT(type);
type_ = std::move(type);
for (Use& use : uses_) {
use.user->schema_ = nullptr;
// execute a Python function, used for Ops we can't optimize but that we want to
// optimize around
struct PythonOp : public Node {
- static constexpr Symbol Kind = prim::PythonOp;
+ static constexpr Symbol Kind = ::c10::prim::PythonOp;
- PythonOp(Graph* graph) : Node(graph, prim::PythonOp) {}
+ PythonOp(Graph* graph) : Node(graph, ::c10::prim::PythonOp) {}
PythonOp* init(
THPObjectPtr&& pyobj,
const std::string& cconv,
struct IfView {
explicit IfView(Node* node) : node_(node) {
- JIT_ASSERT(node->kind() == prim::If);
+ AT_ASSERT(node->kind() == ::c10::prim::If);
}
Value* cond() const {
return node_->input(0);
struct LoopView {
explicit LoopView(Node* node) : node_(node) {
- JIT_ASSERT(node->kind() == prim::Loop || node->kind() == onnx::Loop);
+ AT_ASSERT(
+ node->kind() == ::c10::prim::Loop || node->kind() == ::c10::onnx::Loop);
}
Block* bodyBlock() const {
return node_->blocks().at(0);
+++ /dev/null
-#pragma once
-#include <ATen/core/ivalue.h>
-
-namespace torch {
-namespace jit {
-
-using ::c10::ivalue::List;
-using ::c10::ivalue::Shared;
-
-using ::c10::IValue;
-using ::c10::ivalue::Future;
-using ::c10::ivalue::Tuple;
-
-using ::c10::ivalue::BoolList;
-using ::c10::ivalue::DoubleList;
-using ::c10::ivalue::GenericList;
-using ::c10::ivalue::IntList;
-using ::c10::ivalue::TensorList;
-
-using ::c10::ivalue::ConstantString;
-
-} // namespace jit
-} // namespace torch
#pragma once
#include <ATen/ATen.h>
#include <torch/csrc/jit/constants.h>
-#include <torch/csrc/jit/ivalue.h>
+#include <ATen/core/ivalue.h>
#include <torch/csrc/jit/source_range.h>
#include <torch/csrc/utils/variadic.h>
}
const std::string& name() const {
- JIT_ASSERT(name_);
+ AT_ASSERT(name_);
return *name_;
}
#include <algorithm>
#include <unordered_map>
-#include <torch/csrc/jit/assertions.h>
-#include <torch/csrc/jit/interned_strings.h>
+#include <c10/util/Exception.h>
+#include <ATen/core/interned_strings.h>
#include <torch/csrc/jit/node_hashing.h>
#include <torch/csrc/jit/passes/common_subexpression_elimination.h>
#include <torch/csrc/utils/functional.h>
// This function may be too conservative for general use.
// Do NOT support g/gs attributes.
bool attributesEqualCSE(const Node* lhs, const Node* rhs) {
- JIT_ASSERT(lhs != nullptr);
- JIT_ASSERT(rhs != nullptr);
+ AT_ASSERT(lhs != nullptr);
+ AT_ASSERT(rhs != nullptr);
// One has attributes, the other does not.
if (lhs->hasAttributes() != rhs->hasAttributes())
return false;
} // anonymous namespace
size_t HashNode::operator()(const Node* k) const {
- JIT_ASSERT(k != nullptr);
+ AT_ASSERT(k != nullptr);
return get_hash(
k->kind(),
fmap(k->outputs(), [](const Value* v) { return v->type()->kind(); }),
}
}
#endif
- JIT_ASSERTM(
+ AT_CHECK(
op_ptr_it != operators_by_sig.end(),
"Couldn't find an operator for ",
name);
// it now to implement correct semantic checking for script
#pragma once
-#include <torch/csrc/jit/assertions.h>
-#include <torch/csrc/jit/function_schema.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/ir.h>
#include <torch/csrc/jit/stack.h>
#include <ATen/ATen.h>
+#include <ATen/core/function_schema.h>
#include <functional>
#include <initializer_list>
namespace torch {
namespace jit {
+using ::c10::FunctionSchema;
+
TORCH_API FunctionSchema parseSchema(const std::string& schema);
using OperationCreator = std::function<Operation(const Node*)>;
const auto value = pr.first;
const auto& aliasInfo = pr.second;
// We don't support composite types yet
- JIT_ASSERT(aliasInfo.containedTypes().size() == 0);
+ AT_ASSERT(aliasInfo.containedTypes().size() == 0);
for (const auto aliasSet : aliasInfo.sets()) {
aliasToValue_[aliasSet].insert(value);
}
}
const auto& aliasInfo = valueToAlias_.at(v);
- JIT_ASSERT(aliasInfo.sets().size() > 0);
+ AT_ASSERT(aliasInfo.sets().size() > 0);
// We only need to check one alias set, since if this value belongs to
// multiple alias sets they are all written to
const auto& aliasSet = *aliasInfo.sets().begin();
}
addAlias(input, tupleTypeAliases.at(tupleType));
} else {
- JIT_ASSERT(!shouldAnnotate(input));
+ AT_ASSERT(!shouldAnnotate(input));
}
}
}
// We don't support composite types for alias analysis yet.
- JIT_ASSERT(formal->containedTypes().size() == 0);
+ AT_ASSERT(formal->containedTypes().size() == 0);
// TODO neither unions nor wildcards make sense on an input. We should
// disallow them in function schema
- JIT_ASSERT(!formal->isWildcard())
+ AT_ASSERT(!formal->isWildcard())
const auto& formalAlias = formal->set();
// skip if we've already bound this alias
}
// We don't support composite types for alias analysis yet.
- JIT_ASSERT(formal->containedTypes().size() == 0);
+ AT_ASSERT(formal->containedTypes().size() == 0);
const auto& formalAlias = formal->set();
auto outputAlias = formalToActual.at(formalAlias);
const auto loopCarriedInputs = node->inputs().slice(2); // skip max, cond
const auto blockInputs = bodyBlock->inputs().slice(1); // skip trip
const auto blockOutputs = bodyBlock->outputs().slice(1); // skip trip
- JIT_ASSERT(loopCarriedInputs.size() == blockInputs.size());
- JIT_ASSERT(blockOutputs.size() == node->outputs().size());
+ AT_ASSERT(loopCarriedInputs.size() == blockInputs.size());
+ AT_ASSERT(blockOutputs.size() == node->outputs().size());
// Run alias analysis on the loop body, iterating until the block output
// alias info converges.
// Check whether or not this would change anything
if (valueToAlias_.count(input) != 0) {
- JIT_ASSERT(valueToAlias_.count(output) != 0)
+ AT_ASSERT(valueToAlias_.count(output) != 0)
if (!valueToAlias_[output].isSubsetOf(valueToAlias_[input])) {
notConverged = true;
}
// TODO(suo): the subgraph outputs and node outputs are NOT NECESSARILY the
// same length. Autodifferentiation maybe capture additional outputs in the
// subgraph block.
- JIT_ASSERT(subgraphBlock->outputs().size() >= node->outputs().size());
+ AT_ASSERT(subgraphBlock->outputs().size() >= node->outputs().size());
for (size_t i = 0; i < node->outputs().size(); i++) {
addAlias(node->outputs()[i], subgraphBlock->outputs()[i]);
}
// Union the alias info of `value` with `from`
void AliasDb::addAlias(const Value* value, const Value* from) {
if (!shouldAnnotate(value)) {
- JIT_ASSERT(!shouldAnnotate(from));
+ AT_ASSERT(!shouldAnnotate(from));
return;
}
addAlias(value, valueToAlias_.at(from));
}
void AliasDb::mapAliases(at::ArrayRef<Value*> to, at::ArrayRef<Value*> from) {
- JIT_ASSERT(to.size() == from.size());
+ AT_ASSERT(to.size() == from.size());
for (size_t i = 0; i < to.size(); i++) {
addAlias(to[i], from[i]);
}
// outside), then return nullptr. Since we can only reorder nodes within a
// block, `target` would be irrelevant.
static Node* findSameBlock(Node* target, Node* n) {
- JIT_ASSERT(target->owningGraph() == n->owningGraph());
+ AT_ASSERT(target->owningGraph() == n->owningGraph());
if (target->owningBlock() == n->owningBlock()) {
return target;
} else {
Node* movePoint,
MoveSide moveSide,
bool dryRun) {
- JIT_ASSERT(toMove->owningBlock() == movePoint->owningBlock());
+ AT_ASSERT(toMove->owningBlock() == movePoint->owningBlock());
if (toMove == movePoint) {
return true;
}
}
// 3. Execute the move
- JIT_ASSERT(curNode == movePoint);
+ AT_ASSERT(curNode == movePoint);
if (splitToMoveAndDeps) {
// Move `toMove`
move(toMove, movePoint, moveSide);
}
lhs = subgraphToOwner_.at(lhs->owningGraph());
}
- JIT_ASSERT(false);
+ AT_ASSERT(false);
}
} // namespace jit
} // namespace torch
#include <torch/csrc/jit/passes/batch_mm.h>
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/constants.h>
#include <torch/csrc/jit/custom_operator.h>
-#include <torch/csrc/jit/interned_strings.h>
+#include <ATen/core/interned_strings.h>
#include <torch/csrc/jit/passes/alias_analysis.h>
#include <torch/csrc/jit/passes/dead_code_elimination.h>
#include <torch/csrc/jit/passes/peephole.h>
}
drop(stack, num_inputs);
- JIT_ASSERT(inputs.size() > 0);
- JIT_ASSERT(inputs.size() % 2 == 0);
+ AT_ASSERT(inputs.size() > 0);
+ AT_ASSERT(inputs.size() % 2 == 0);
size_t side_num_elems = inputs.size() / 2;
auto lhs_inputs = at::TensorList(inputs).slice(0, side_num_elems);
auto rhs_inputs = at::TensorList(inputs).slice(side_num_elems);
matmuls.push_back(n);
} else if (n->matches("aten::t(Tensor self) -> Tensor")) {
Node* input_node = n->input()->node();
- JIT_ASSERT(input_node->matches(
+ AT_ASSERT(input_node->matches(
"aten::mm(Tensor self, Tensor mat2) -> Tensor"));
// (AB)^T == B^TA^T
WithInsertPoint insert_guard{input_node};
// NB: 8 is the current loop unrolling factor
static constexpr size_t how_many_is_many = 8;
const auto batch_side = [&](std::vector<Node*>& mms, Side side) {
- JIT_ASSERT(!mms.empty());
+ AT_ASSERT(!mms.empty());
for (int64_t i = static_cast<int64_t>(mms.size()) - 2; i >= 0; --i) {
bool move_ok = alias_db.moveBeforeTopologicallyValid(mms[i], mms[i + 1]);
- JIT_ASSERT(move_ok);
+ AT_ASSERT(move_ok);
}
WithInsertPoint insert_guard{mms[0]};
Graph* graph = mms[0]->owningGraph();
#include <algorithm>
#include <unordered_map>
-#include <torch/csrc/jit/assertions.h>
-#include <torch/csrc/jit/interned_strings.h>
+#include <c10/util/Exception.h>
+#include <ATen/core/interned_strings.h>
#include <torch/csrc/jit/node_hashing.h>
#include <torch/csrc/jit/passes/alias_analysis.h>
#include <torch/csrc/jit/passes/common_subexpression_elimination.h>
-#include <torch/csrc/jit/interned_strings.h>
+#include <ATen/core/interned_strings.h>
#include <torch/csrc/jit/ir.h>
#include <torch/csrc/jit/node_hashing.h>
#include <torch/csrc/jit/passes/constant_pooling.h>
#include <torch/csrc/jit/constants.h>
#include <torch/csrc/jit/interpreter.h>
#include <torch/csrc/jit/ir.h>
-#include <torch/csrc/jit/ivalue.h>
+#include <ATen/core/ivalue.h>
#include <torch/csrc/jit/operator.h>
#include <torch/csrc/jit/passes/alias_analysis.h>
#include <torch/csrc/jit/passes/dead_code_elimination.h>
void inlineIf(Node* n, const AliasDb& aliasDb) {
auto input_bool = constant_as<bool>(n->input());
- JIT_ASSERT(input_bool);
+ AT_ASSERT(input_bool);
size_t block_index = *input_bool ? 0 : 1;
ConstantPropagation(n->blocks().at(block_index), aliasDb);
inlineIfBody(n->blocks().at(block_index));
// remove extra outputs from the node
bool removeExtraIfOutputs(Node* n) {
- JIT_ASSERTM(n->kind() == prim::If, "Only supported for If nodes");
+ AT_CHECK(n->kind() == prim::If, "Only supported for If nodes");
auto true_block = n->blocks()[0];
auto false_block = n->blocks()[1];
auto initial_outputs = true_block->outputs().size();
#include <torch/csrc/jit/passes/create_autodiff_subgraphs.h>
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/autodiff.h>
#include <torch/csrc/jit/ir.h>
#include <torch/csrc/jit/passes/alias_analysis.h>
//
// Returns true if an inlining has occured, false otherwise.
bool inlineIfTooSmall(Node* n) {
- JIT_ASSERT(n->kind() == prim::DifferentiableGraph);
+ AT_ASSERT(n->kind() == prim::DifferentiableGraph);
auto subgraph = SubgraphUtils::getSubgraph(n);
size_t i = 0;
for (auto it = subgraph->nodes().begin(); it != subgraph->nodes().end();
Node* consumer,
Node* producer,
AliasDb& aliasDb) {
- JIT_ASSERT(consumer->kind() == prim::DifferentiableGraph);
+ AT_ASSERT(consumer->kind() == prim::DifferentiableGraph);
bool canMerge = shouldConsiderForMerge(producer) &&
aliasDb.moveBeforeTopologicallyValid(producer, consumer);
namespace torch {
namespace jit {
+namespace prim {
+using namespace ::c10::prim;
+}
+
class DeadCodeEliminator {
public:
explicit DeadCodeEliminator(std::shared_ptr<Graph> graph)
return;
}
- JIT_ASSERT(node->owningBlock()->return_node() == node);
+ AT_ASSERT(node->owningBlock()->return_node() == node);
auto outerNode = node->owningBlock()->owningNode();
if (outerNode == nullptr || outerNode->kind() == prim::Reverse) {
// If there's no outer node, we're looking at the graph's top-level
}
// Collect all inputs that are actually live
- if (outerNode->kind() == prim::Loop || outerNode->kind() == onnx::Loop) {
+ if (outerNode->kind() == prim::Loop ||
+ outerNode->kind() == c10::onnx::Loop) {
// Special handling to deal with loop carried dependencies.
auto loop = LoopView(outerNode);
for (size_t i = 0; i < loop.carriedOutputs().size(); i++) {
// the loop body.
liveValues_.insert(loop.nextCond());
} else {
- JIT_ASSERT(outerNode->outputs().size() == node->inputs().size());
+ AT_ASSERT(outerNode->outputs().size() == node->inputs().size());
for (size_t i = 0; i < outerNode->outputs().size(); i++) {
auto innerOutput = node->inputs()[i];
auto outerOutput = outerNode->outputs()[i];
#include <torch/csrc/jit/passes/graph_fuser.h>
#include <ATen/ExpandUtils.h>
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/autodiff.h>
#include <torch/csrc/jit/custom_operator.h>
#include <torch/csrc/jit/fuser/interface.h>
const int64_t ndim = pop(stack).toInt();
auto self = pop(stack).toTensor();
c10::SmallVector<int64_t, 8> sizes(ndim, 1);
- JIT_ASSERT(self.dim() == 1);
+ AT_ASSERT(self.dim() == 1);
sizes.at(1) = self.size(0);
push(stack, self.reshape(sizes));
return 0;
}
Value* broadcastSizes(at::ArrayRef<Value*> sizes) {
- JIT_ASSERT(!sizes.empty());
+ AT_ASSERT(!sizes.empty());
Graph* graph = sizes[0]->owningGraph();
Node* broadcast_n =
graph->insertNode(graph->create(prim::BroadcastSizes, sizes));
}
Graph& getSubgraph(Node* n) {
- JIT_ASSERT(n->kind() == prim::FusionGroup);
+ AT_ASSERT(n->kind() == prim::FusionGroup);
return *n->g(attr::Subgraph);
}
},
&bn_graph);
- JIT_ASSERT(isFusableBatchNorm(batch_norm));
+ AT_ASSERT(isFusableBatchNorm(batch_norm));
WithInsertPoint insert_guard{batch_norm};
Value* input = batch_norm->namedInput(attr::input);
Value* input_dim = graph_->insert(aten::dim, {input});
// DOES NOT WORK if n is a consumer of an output of the fusion group
// returns the node _inside_ the group that represents the node
Node* mergeNodeIntoGroup(Node* group, Node* n) {
- JIT_ASSERT(n->kind() != prim::FusionGroup);
+ AT_ASSERT(n->kind() != prim::FusionGroup);
auto& subgraph = getSubgraph(group);
// map from nodes in the surrounding graph to parameters in the fusion
// group's subgraph that correspond to them
std::unordered_map<Value*, Value*> inputs_map;
size_t i = 0;
- JIT_ASSERT(group->inputs().size() == subgraph.inputs().size());
+ AT_ASSERT(group->inputs().size() == subgraph.inputs().size());
for (auto input : group->inputs()) {
inputs_map[input] = subgraph.inputs()[i++];
}
// so we generally don't allow fusing tensor-scalar operations unless
// the scalar is constant. In those cases we inline the constants
// directly in the body of the fused group.
- JIT_ASSERT(input->node()->kind() == prim::Constant);
+ AT_ASSERT(input->node()->kind() == prim::Constant);
Node* in_const =
subgraph.createClone(input->node(), [](Value*) -> Value* {
throw std::runtime_error("unexpected input");
mergeFusionGroups(group, producer->node());
return group;
}
- JIT_ASSERT(producer->node()->outputs().size() == 1);
+ AT_ASSERT(producer->node()->outputs().size() == 1);
Node* merged = mergeNodeIntoGroup(group, producer->node());
// remaining uses of this producer can occur because we allow
// fusion in cases where uses remain after the consumer
}
c10::optional<Node*> findFusedChunk(Node* group, Value* input) {
- JIT_ASSERT(group->kind() == prim::FusionGroup);
+ AT_ASSERT(group->kind() == prim::FusionGroup);
auto it = std::find(group->inputs().begin(), group->inputs().end(), input);
if (it == group->inputs().end()) {
return c10::nullopt;
// If subgraph_input is an input to prim::ConstantChunk, it will have 1 use
auto* node = subgraph_input->uses().at(0).user;
if (node->kind() == prim::ConstantChunk) {
- JIT_ASSERT(subgraph_input->uses().size() == 1);
+ AT_ASSERT(subgraph_input->uses().size() == 1);
return node;
}
return c10::nullopt;
// input.
graph_node_list::iterator fuseChunk(Node* consumer, Value* producer) {
auto* chunk = producer->node();
- JIT_ASSERT(consumer->kind() == prim::FusionGroup);
- JIT_ASSERT(chunk->kind() == prim::ConstantChunk);
+ AT_ASSERT(consumer->kind() == prim::FusionGroup);
+ AT_ASSERT(chunk->kind() == prim::ConstantChunk);
// if producer's input is already an input to a prim::ConstantChunk node,
// we cannot add a new prim::ConstantChunk node because of invariant (2).
auto new_tensors_it = new_tensors.begin();
for (size_t i = 0; i < node->inputs().size(); ++i) {
if (node->inputs()[i]->type()->isSubtypeOf(DynamicType::get())) {
- JIT_ASSERT(new_tensors_it != new_tensors.end());
+ AT_ASSERT(new_tensors_it != new_tensors.end());
node->replaceInput(i, *(new_tensors_it++));
}
}
}
Node* promoteChunkToBroadcastingChunk(Node* chunk) {
- JIT_ASSERT(chunk->kind() == prim::ConstantChunk);
+ AT_ASSERT(chunk->kind() == prim::ConstantChunk);
size_t nchunks = chunk->i(attr::chunks);
Node* bchunk =
}
// multiple return operators
Node* producer_for_chunk_node = producer_for_chunk->node();
- JIT_ASSERT(producer_for_chunk_node->outputs().size() == 1);
+ AT_ASSERT(producer_for_chunk_node->outputs().size() == 1);
// Convert chunk to bchunk, if it isn't one already. The bchunk represents a
// broadcast and one or more chunk operations.
auto chunked_inputs_it = chunked_inputs.begin();
for (Value* original_input : original_inputs) {
if (original_input->type()->isSubtypeOf(DynamicType::get())) {
- JIT_ASSERT(chunked_inputs_it != chunked_inputs.end());
+ AT_ASSERT(chunked_inputs_it != chunked_inputs.end());
chunked_op->addInput(
chunked_inputs_it->at(chunk_sel->offset() % nchunks));
++chunked_inputs_it;
auto tensor_sizes = fmap(tensor_inputs, [](Value* v) {
return v->owningGraph()->insert(aten::size, {v});
});
- JIT_ASSERT(!tensor_sizes.empty());
+ AT_ASSERT(!tensor_sizes.empty());
Value* output_size = tensor_sizes.size() == 1
? tensor_sizes[0]
: broadcastSizes(tensor_sizes);
auto inputs = fusion_group->inputs();
auto sinputs = subgraph->inputs();
- JIT_ASSERT(inputs.size() == sinputs.size());
+ AT_ASSERT(inputs.size() == sinputs.size());
for (size_t i = 0; i < inputs.size(); ++i) {
shape_of[sinputs[i]] = graph->insert(aten::size, {inputs[i]});
}
// beginning of the kernel.
auto outputs = fusion_group->outputs();
auto soutputs = subgraph->outputs();
- JIT_ASSERT(outputs.size() == soutputs.size());
+ AT_ASSERT(outputs.size() == soutputs.size());
for (size_t i = 0; i < outputs.size(); ++i) {
if (usedOnlyInSize(outputs[i]))
continue;
});
auto shapes =
fmap(tensor_inputs, [&](Value* v) { return shape_of.at(v); });
- JIT_ASSERT(!shapes.empty());
+ AT_ASSERT(!shapes.empty());
shape_of.emplace(
n->output(), shapes.size() == 1 ? shapes[0] : broadcastSizes(shapes));
}
if (usedOnlyInSize(output) && shape_of.count(soutput) > 0) {
auto uses = output->uses();
for (Use u : uses) {
- JIT_ASSERT(u.user->matches("aten::size(Tensor self) -> int[]"));
+ AT_ASSERT(u.user->matches("aten::size(Tensor self) -> int[]"));
u.user->output()->replaceAllUsesWith(shape_of.at(soutput));
u.user->destroy();
}
}
Node* createFusedConcat(Node* node) {
- JIT_ASSERT(node->kind() == aten::cat);
+ AT_ASSERT(node->kind() == aten::cat);
Graph* graph = node->owningGraph();
Node* list_construct = node->namedInput(attr::tensors)->node();
}
any_fused = true;
auto maybe_group = tryFuse(fused_cat, input);
- JIT_ASSERT(maybe_group && maybe_group == fused_cat);
+ AT_ASSERT(maybe_group && maybe_group == fused_cat);
// We could have destroyed multiple inputs when performing this fusion,
// so we have to recompute the list and iterate over it again.
sorted_inputs = sortReverseTopological(fused_cat->inputs());
#include <torch/csrc/jit/passes/loop_unrolling.h>
-#include <torch/csrc/jit/assertions.h>
-#include <torch/csrc/jit/interned_strings.h>
+#include <c10/util/Exception.h>
+#include <ATen/core/interned_strings.h>
#include <torch/csrc/jit/symbolic_variable.h>
#include <torch/csrc/jit/constants.h>
// Update loop-carried values
// NB: note that we don't need to worry about the loop counter, because
// we've replaced it with a loop-carried variable
- JIT_ASSERT(body->inputs().size() == body->outputs().size());
+ AT_ASSERT(body->inputs().size() == body->outputs().size());
for (size_t i = 1; i < body->inputs().size(); ++i) {
value_map[body->inputs()[i]] = get_value(body->outputs()[i]);
}
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/passes/dead_code_elimination.h>
#include <torch/csrc/jit/passes/lower_tuples.h>
#include <torch/csrc/utils/functional.h>
for (size_t i = 0; i < n->inputs().size();) {
auto input = n->inputs()[i];
if (TupleTypePtr tt = input->type()->cast<TupleType>()) {
- JIT_ASSERTM(
+ AT_CHECK(
white_list.count(n->kind()) > 0,
"tuple appears in op that does not forward tuples");
- JIT_ASSERTM(
+ AT_CHECK(
input->node()->kind() == prim::TupleConstruct,
"tuple use not matched to tuple construct");
for (size_t j = 0; j < tt->elements().size(); ++j) {
// tup = (t0, t1)
// is placed at the current insertion point
if (TupleTypePtr tt = output->type()->cast<TupleType>()) {
- JIT_ASSERTM(
+ AT_CHECK(
white_list.count(n->kind()) > 0,
"tuple appears in op that does not forward tuples");
for (size_t j = 0; j < tt->elements().size(); j++) {
static void EnsureNoTuples(ArrayRef<Value*> values) {
for (Value* v : values) {
- JIT_ASSERTM(
+ AT_CHECK(
v->type()->kind() != TypeKind::TupleType, "Couldn't lower all tuples.");
}
}
#include <torch/csrc/autograd/function.h>
#include <torch/csrc/autograd/symbolic.h>
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/passes/dead_code_elimination.h>
#include <torch/csrc/jit/passes/onnx.h>
#include <torch/csrc/utils/functional.h>
// Returns a node that n maps to in the new graph
auto envFn = [&env](Value* n) -> Value* {
auto it = env.find(n);
- JIT_ASSERTM(it != env.end(), "Dangling node reference");
- JIT_ASSERTM(it->second, "Unused node was subsequently used");
+ AT_CHECK(it != env.end(), "Dangling node reference");
+ AT_CHECK(it->second, "Unused node was subsequently used");
return it->second;
};
for (auto arg_type : op->cconv) {
py::object obj;
if (arg_type == 'c') {
- JIT_ASSERTM(
+ AT_CHECK(
scalar_it != op->scalar_args.end(),
"expected too many scalar args");
obj = py::reinterpret_borrow<py::object>(
py::handle((scalar_it++)->get()));
} else if (arg_type == 'd') {
- JIT_ASSERTM(node_it != inputs.end(), "expected too many inputs");
+ AT_CHECK(node_it != inputs.end(), "expected too many inputs");
obj = py::cast(envFn(*node_it++));
} else {
throw std::runtime_error("unexpected calling convention");
void FixupONNXLoops(Block* block) {
for (auto* node : block->nodes()) {
- if (node->kind() == torch::jit::onnx::Loop) {
- JIT_ASSERT(node->blocks().size() == 1);
+ if (node->kind() == ::c10::onnx::Loop) {
+ AT_ASSERT(node->blocks().size() == 1);
auto* sub_block = node->blocks()[0];
sub_block->insertInput(1, "cond");
}
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/passes/onnx/peephole.h>
#include <c10/util/Optional.h>
namespace torch {
namespace jit {
+namespace onnx {
+using namespace ::c10::onnx;
+}
+
bool isRNN(const Node* node) {
auto k = node->kind();
return k == onnx::RNN || k == onnx::LSTM || k == onnx::GRU;
std::vector<int64_t> composeTransposes(
const std::vector<int64_t>& t1,
const std::vector<int64_t>& t2) {
- JIT_ASSERT(t1.size() == t2.size());
+ AT_ASSERT(t1.size() == t2.size());
std::vector<int64_t> ret;
ret.reserve(t1.size());
for (const auto& i : t2) {
- JIT_ASSERT(i < int64_t(t1.size()));
+ AT_ASSERT(i < int64_t(t1.size()));
ret.push_back(t1[i]);
}
return ret;
auto& broadcast_positions = getBroadcastPositions(n);
if (!broadcast_positions.empty()) {
- JIT_ASSERT(!n->hasAttribute(attr::axis));
+ AT_ASSERT(!n->hasAttribute(attr::axis));
}
for (size_t position : broadcast_positions) {
static void replaceInputWithList(Node* node, size_t i, ArrayRef<Value*> to) {
node->removeInput(i);
for (auto* to_val : to) {
- JIT_ASSERT(to_val->owningGraph() == node->owningGraph());
+ AT_ASSERT(to_val->owningGraph() == node->owningGraph());
node->insertInput(i++, to_val);
}
}
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/attributes.h>
#include <torch/csrc/jit/export.h>
#include <torch/csrc/jit/ir.h>
}
out << name_;
} else {
- JIT_ASSERT(prefix_);
+ AT_ASSERT(prefix_);
out << "getattr(";
prefix_->emit(out);
out << ", ";
// block_point's output.
Node* scanValue(Node* block_point, Value* v) {
Node* n = v->node();
- JIT_ASSERT(isConstantLike(n) || output_inline_.count(n) == 0);
+ AT_ASSERT(isConstantLike(n) || output_inline_.count(n) == 0);
if (n == block_point &&
canInline(v)) { // the node must be at the expected point of the typical
return i;
}
}
- JIT_ASSERT(t.is_variable());
+ AT_ASSERT(t.is_variable());
tensor_table_.emplace_back(std::move(t));
return tensor_table_.size() - 1;
}
}
} else {
// vararg functions like format can have extra arguments
- JIT_ASSERT(schema.is_vararg());
+ AT_ASSERT(schema.is_vararg());
}
stmt << v;
}
}
// have we use all the provided defaults?
- JIT_ASSERT(defaults_offset == defaults.end());
+ AT_ASSERT(defaults_offset == defaults.end());
out << ") -> " << resultType(graph)->python_str() << ":\n";
{
// to be correctly printed for export (a process that happens before
// optimization passes run)
const static std::unordered_set<Symbol> unneeded = {
- onnx::Reshape, // only used in onnx
- onnx::Shape, // only used in onnx
+ c10::onnx::Reshape, // only used in onnx
+ c10::onnx::Shape, // only used in onnx
prim::AnyDefined, // temporarily inserted by autograd
prim::AutogradAdd, // temporarily inserted by autograd
prim::ConstantChunk, // optimization pass adds it
#include <torch/csrc/jit/argument_spec.h>
#include <torch/csrc/jit/ir.h>
#include <torch/csrc/jit/operator.h>
-#include <torch/csrc/jit/type.h>
+#include <ATen/core/jit_type.h>
#include <vector>
void setRequiresGrad(
at::ArrayRef<Value*> outputs,
const std::vector<bool>& values) {
- JIT_ASSERT(outputs.size() == values.size());
+ AT_ASSERT(outputs.size() == values.size());
for (size_t i = 0; i < values.size(); ++i) {
setRequiresGrad(outputs[i], values[i]);
}
}
std::vector<bool> bitwiseOr(std::vector<bool> a, const std::vector<bool>& b) {
- JIT_ASSERT(a.size() == b.size());
+ AT_ASSERT(a.size() == b.size());
for (size_t i = 0; i < a.size(); ++i) {
a[i] = a[i] || b[i];
}
#include <torch/csrc/jit/passes/shape_analysis.h>
#include <torch/csrc/jit/argument_spec.h>
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/constants.h>
#include <torch/csrc/jit/ir.h>
#include <torch/csrc/jit/operator.h>
namespace torch {
namespace jit {
+namespace prim {
+using namespace ::c10::prim;
+}
+
struct propagation_error : std::exception {};
#define SHAPE_ASSERT(cond) \
ArrayRef<Value*> lhs,
ArrayRef<Value*> rhs,
ArrayRef<Value*> outputs) {
- JIT_ASSERT(lhs.size() == rhs.size() && rhs.size() == outputs.size());
+ AT_ASSERT(lhs.size() == rhs.size() && rhs.size() == outputs.size());
bool changed = false;
for (size_t i = 0; i < lhs.size(); ++i) {
auto old_output_type = outputs[i]->type();
auto new_type = unifyTypes(lhs[i]->type(), rhs[i]->type());
- JIT_ASSERT(new_type);
+ AT_ASSERT(new_type);
outputs[i]->setType(*new_type);
if (*old_output_type != *outputs[i]->type())
changed = true;
// preceded by schema checking.
op(stack);
- JIT_ASSERT(stack.size() == node->outputs().size());
+ AT_ASSERT(stack.size() == node->outputs().size());
for (size_t i = 0; i < stack.size(); ++i) {
// some ops may have mixed tensor/primitive outputs
// for primitives, we don't need to change the type because it is already
}
case prim::TupleUnpack: {
auto tuple_type = node->input()->type()->cast<TupleType>();
- JIT_ASSERT(
+ AT_ASSERT(
tuple_type &&
tuple_type->elements().size() == node->outputs().size());
auto elems = tuple_type->elements();
}
static c10::optional<size_t> determineListSize(Value* list) {
- JIT_ASSERT(list->type()->cast<ListType>());
+ AT_ASSERT(list->type()->cast<ListType>());
if (auto shape = constant_as<std::vector<int64_t>>(list)) {
return shape->size();
}
if (tensor_types.size() == 1) {
return tensor_types[0];
}
- JIT_ASSERT(!tensor_types.empty());
+ AT_ASSERT(!tensor_types.empty());
auto any_type = tensor_types[arg_for_type];
auto max_dims = any_type->dim();
for (auto& type : tensor_types) {
return false;
} else {
auto outputs = node->outputs();
- JIT_ASSERT(types.size() == outputs.size());
+ AT_ASSERT(types.size() == outputs.size());
for (size_t i = 0; i < types.size(); ++i) {
- JIT_ASSERT(outputs[i]->type()->isSubtypeOf(DynamicType::get()));
+ AT_ASSERT(outputs[i]->type()->isSubtypeOf(DynamicType::get()));
outputs[i]->setType(types[i]);
}
return true;
input_type->withSizesStrides(sizes, strides));
}
return true;
- } else if (node->kind() == onnx::Shape) {
+ } else if (node->kind() == ::c10::onnx::Shape) {
SHAPE_ASSERT(node->inputs().size() == 1 && node->outputs().size() == 1);
std::vector<int64_t> dim_vec = {
(int64_t)tensor_types.at(0)->sizes().size()};
node->output()->setType(
CompleteTensorType::create(at::kLong, at::kCPU, dims));
return true;
- } else if (node->kind() == onnx::Reshape) {
+ } else if (node->kind() == ::c10::onnx::Reshape) {
setUnshapedType(node);
return true;
}
// where we do not know if a value is defined since at the top level
// a gradient graph is composed of Linear nodes and AutogradAdds
// and LinearNodes only appear in these graphs
- JIT_ASSERT(state[input] != State::Unknown);
+ AT_ASSERT(state[input] != State::Unknown);
}
// hoist the nodes in the GradOf body to be before the linear block
for (auto it = body->nodes().begin(); it != body->nodes().end();) {
}
const auto& lhs = inputs.at(i);
const auto& rhs = inputs.at(j);
- JIT_ASSERT(!lhs.isAliasOf(rhs));
+ AT_ASSERT(!lhs.isAliasOf(rhs));
}
}
}
if (output.iValue.isAliasOf(input.iValue)) {
const auto inputSet = input.aliasInfo;
const auto outputSet = output.aliasInfo;
- JIT_ASSERT(inputSet && outputSet);
- JIT_ASSERT(inputSet->isSubsetOf(*outputSet));
+ AT_ASSERT(inputSet && outputSet);
+ AT_ASSERT(inputSet->isSubsetOf(*outputSet));
}
}
}
void checkWrites(
const std::vector<AliasAndIValue>& inputs,
const std::vector<IValue>& deepCopiedInputs) {
- JIT_ASSERT(inputs.size() == deepCopiedInputs.size());
+ AT_ASSERT(inputs.size() == deepCopiedInputs.size());
for (size_t i = 0; i < inputs.size(); i++) {
const auto& input = inputs[i];
const auto& deepCopiedInput = deepCopiedInputs[i];
if (!input.aliasInfo || !input.aliasInfo->isWrite()) {
- JIT_ASSERT(deepEquals(input.iValue, deepCopiedInput));
+ AT_ASSERT(deepEquals(input.iValue, deepCopiedInput));
}
}
}
return node;
}
}
- JIT_ASSERT(false);
+ AT_ASSERT(false);
}
// Handle a few special cases where we need to propagate constants
if (inputValue) {
push(stack, *inputValue);
} else {
- JIT_ASSERT(input->type()->kind() == TypeKind::OptionalType);
+ AT_ASSERT(input->type()->kind() == TypeKind::OptionalType);
push(stack, IValue());
}
}
}
void unmergeSubgraph(Node* subgraphNode) {
- JIT_ASSERT(subgraphNode->kind() == prim::DifferentiableGraph);
+ AT_ASSERT(subgraphNode->kind() == prim::DifferentiableGraph);
// Inline the graph, replace uses of node outputs and destroy the node
const auto subgraphOutputs = inlineGraph(
getSubgraph(subgraphNode), subgraphNode->inputs(), subgraphNode);
- JIT_ASSERT(subgraphOutputs.size() >= subgraphNode->outputs().size());
+ AT_ASSERT(subgraphOutputs.size() >= subgraphNode->outputs().size());
for (size_t i = 0; i < subgraphNode->outputs().size(); ++i) {
subgraphNode->outputs()[i]->replaceAllUsesWith(subgraphOutputs[i]);
}
}
void mergeNodeIntoSubgraph(Node* toMerge, Node* subgraphNode) {
- JIT_ASSERT(hasSubgraph(subgraphNode));
+ AT_ASSERT(hasSubgraph(subgraphNode));
if (hasSubgraph(toMerge)) {
return mergeSubgraph(subgraphNode, toMerge);
}
// Map from values in the surrounding graph to inputs in the subgraph
std::unordered_map<Value*, Value*> inputsMap;
- JIT_ASSERT(subgraphNode->inputs().size() == subgraph->inputs().size());
+ AT_ASSERT(subgraphNode->inputs().size() == subgraph->inputs().size());
size_t idx = 0;
for (auto input : subgraphNode->inputs()) {
inputsMap[input] = subgraph->inputs()[idx];
// Initialize a map of inner graph values to outer graph values
std::unordered_map<const Value*, Value*> innerToOuter;
const auto innerInputs = subgraph->inputs();
- JIT_ASSERT(outerInputs.size() == innerInputs.size());
+ AT_ASSERT(outerInputs.size() == innerInputs.size());
for (size_t i = 0; i < innerInputs.size(); ++i) {
innerToOuter[innerInputs[i]] = outerInputs[i];
}
#include <torch/csrc/DynamicTypes.h>
#include <torch/csrc/THP.h>
#include <torch/csrc/autograd/variable.h>
-#include <torch/csrc/jit/interned_strings.h>
-#include <torch/csrc/jit/ivalue.h>
+#include <ATen/core/interned_strings.h>
+#include <ATen/core/ivalue.h>
#include <torch/csrc/jit/pybind_utils.h>
#include <torch/csrc/jit/tracer.h>
#include <torch/csrc/utils/pybind.h>
#pragma once
#include <torch/csrc/Device.h>
-#include <torch/csrc/jit/function_schema.h>
-#include <torch/csrc/jit/ivalue.h>
+#include <ATen/core/ivalue.h>
#include <torch/csrc/jit/operator.h>
#include <torch/csrc/jit/script/module.h>
#include <torch/csrc/jit/stack.h>
-#include <torch/csrc/jit/type.h>
+#include <ATen/core/jit_type.h>
#include <torch/csrc/utils/six.h>
#include <torch/csrc/utils/auto_gil.h>
#include <torch/csrc/utils/pybind.h>
+#include <ATen/core/function_schema.h>
#include <c10/util/Exception.h>
#include <algorithm>
namespace jit {
namespace detail {
+using ::c10::Argument;
+using ::c10::FunctionSchema;
+
// error reporting: when reporting user-caused errors, these functions should
// not use AT_ERROR macros, since these macros add stack trace information
// that is confusing to display to the end user since it always reports
Operation createPythonOperation(const Node* op_) {
AutoGIL gil;
const PythonOp* op = static_cast<const PythonOp*>(op_);
- // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
const py::function func = py::reinterpret_borrow<const py::function>(
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
py::handle(const_cast<PythonOp*>(op)->pyobj.get()));
size_t num_inputs = 0;
num_inputs++;
}
- JIT_ASSERT(op->outputs().size() == 1);
+ AT_ASSERT(op->outputs().size() == 1);
return [=](Stack& stack) {
AutoGIL gil;
size_t next_tensor = 0;
for (auto arg_type : op->cconv) {
if (arg_type == 'c') {
- // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
py_inputs[i] = py::reinterpret_borrow<const py::object>(
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
const_cast<PythonOp*>(op)->scalar_args[next_scalar++].get());
} else if (arg_type == 'd') {
py_inputs[i] =
});
m.def("_tracer_set_get_unique_name_fn", [](py::function func) {
const auto& tracing_state = getTracingState();
- JIT_ASSERT(tracing_state);
+ AT_ASSERT(tracing_state);
tracing_state->lookup_var_name_fn =
[func](const Variable& var) -> std::string {
AutoGIL ag;
});
m.def("_tracer_set_force_outplace", [](bool force_outplace) {
const auto& tracing_state = getTracingState();
- JIT_ASSERT(tracing_state);
+ AT_ASSERT(tracing_state);
tracing_state->force_outplace = force_outplace;
});
}
(shape[dim] + split_size - 1) / split_size, 1);
last_shape[dim] =
split_size - (split_size * num_splits - shape[dim]);
- JIT_ASSERT(last_shape[dim] >= 0);
+ AT_ASSERT(last_shape[dim] >= 0);
}
push(stack, std::move(regular_shape));
push(stack, std::move(last_shape));
};
}),
Operator(
- onnx::Reshape,
+ c10::onnx::Reshape,
[](const Node* node) {
return [=](Stack& stack) {
at::Tensor input, shape;
pop(stack, input, shape);
shape = shape.contiguous();
- JIT_ASSERT(shape.ndimension() == 1);
+ AT_ASSERT(shape.ndimension() == 1);
at::IntList shape_list(shape.data<int64_t>(), shape.size(0));
push(stack, input.reshape(shape_list));
return 0;
};
}),
Operator(
- onnx::Shape,
+ c10::onnx::Shape,
[](const Node* node) {
return [=](Stack& stack) {
auto t = pop(stack).toTensor();
int64_t num_results = result.size();
if (num_results != chunks) {
if (num_results > chunks) {
- JIT_ASSERTM(
+ AT_CHECK(
num_results == chunks,
"Expected chunk to return ",
chunks,
[](const Node* node) {
Code code(node->g(attr::Subgraph));
int n_inputs = node->inputs().size();
- JIT_ASSERT(node->blocks().size() == 0);
- JIT_ASSERT(node->hasAttribute(attr::Subgraph));
+ AT_ASSERT(node->blocks().size() == 0);
+ AT_ASSERT(node->hasAttribute(attr::Subgraph));
return [=](Stack& stack) {
// Move inputs to a separate stack
InterpreterState forked_interprester(code);
data += strides[dim] * elementSize;
}
} else {
- JIT_ASSERT(obj.isIntList() || obj.isDoubleList() || obj.isBoolList());
+ AT_ASSERT(obj.isIntList() || obj.isDoubleList() || obj.isBoolList());
if (obj.isIntList()) {
storeLastDimension<int64_t>(data, sizes, strides, dim, elementSize, obj.toIntListRef());
} else if (obj.isDoubleList()){
auto defaults = peek(stack, 1, 2).toIntListRef();
drop(stack, 2);
- JIT_ASSERT(defaults.size() > list.size());
+ AT_ASSERT(defaults.size() > list.size());
// TODO: allow list of optionals to be filled in with defaults
// i.e. list_with_default([1, 2, None], [1, 2, 3]) -> [1, 2, 3]
#include <torch/csrc/jit/ir.h>
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/operator.h>
#include <algorithm>
#pragma once
#include <c10/macros/Macros.h>
#include <torch/csrc/WindowsTorchApiMacro.h>
-#include <torch/csrc/jit/assertions.h>
-#include <torch/csrc/jit/interned_strings.h>
+#include <c10/util/Exception.h>
+#include <ATen/core/interned_strings.h>
#include <memory>
// will always be valid as long as Graph is alive.
struct Scope;
using ScopePtr = c10::intrusive_ptr<Scope>;
+using c10::Symbol;
struct TORCH_API Scope : public c10::intrusive_ptr_target {
private:
loadBuiltinFunctions();
state = INITIALIZED;
}
- JIT_ASSERT(state == INITIALIZED);
+ AT_ASSERT(state == INITIALIZED);
auto it = builtins_by_name.find(name);
if (it == builtins_by_name.end())
return empty;
#include <torch/csrc/jit/script/compiler.h>
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/hooks_for_testing.h>
#include <torch/csrc/jit/interpreter.h>
#include <torch/csrc/jit/ir.h>
// inputs: loop_counter, lcd0, lcd1, ...
// outputs: loop_condition, lcd0, lcd1, ...
// captured_inputs: lcd0, lcd1, ...
- JIT_ASSERT(b->inputs().size() == b->outputs().size());
- JIT_ASSERT(b->inputs().size() == captured_inputs.size() + 1);
+ AT_ASSERT(b->inputs().size() == b->outputs().size());
+ AT_ASSERT(b->inputs().size() == captured_inputs.size() + 1);
for (size_t i = b->inputs().size() - 1; i > 0; i--) {
// nothing changed along this loop
if (b->inputs()[i] == b->outputs()[i]) {
graph(method.graph()),
resolver(std::move(resolver_)),
environment_stack(nullptr) {
- JIT_ASSERT(resolver);
+ AT_ASSERT(resolver);
pushFrame(graph->block(), /*starts_def=*/true);
// Type annotations exclude explicitly typing the "self" parameter, so in
<< expected_annotation_size << ")!";
}
if (self) {
- JIT_ASSERT(it != end);
+ AT_ASSERT(it != end);
environment_stack->setSugaredVar(def.range(), (*it).ident().name(), self);
++it;
}
const FunctionSchema& schema,
Block* block) {
// rewrites ensure there is always a return statement in program
- JIT_ASSERT(def_stack_.back().merged_return_type_);
+ AT_ASSERT(def_stack_.back().merged_return_type_);
// outputs
Value* result = environment_stack->getVar("$return", range);
block->registerOutput(result);
<< result->type()->python_str();
}
}
- JIT_ASSERT(result_type);
+ AT_ASSERT(result_type);
def_stack_.back().merged_return_type_ = result_type;
environment_stack->setVar(stmt.range(), "$return", result);
}
// list.set_item(get_item(idx).add_(value))
// similar to how Python handles things.
const auto listType = sliceable->type()->cast<ListType>();
- JIT_ASSERT(listType != nullptr);
+ AT_ASSERT(listType != nullptr);
bool isTensorList =
listType->getElementType()->isSubtypeOf(DynamicType::get());
Stack stack;
stack.push_back(*maybe_constant_input);
op(stack);
- JIT_ASSERT(stack.size() == 1);
+ AT_ASSERT(stack.size() == 1);
return graph->insertConstant(stack[0], tree->range());
}
// XXX: If list slicing becomes more complicated or stops using
// aten::slice, we should separate it from this function.
if (dim) {
- JIT_ASSERT(input->type()->isSubtypeOf(DynamicType::get()));
+ AT_ASSERT(input->type()->isSubtypeOf(DynamicType::get()));
args.emplace_back(loc, "dim", graph->insertConstant(dim.value(), loc));
} else {
- JIT_ASSERT(!input->type()->isSubtypeOf(DynamicType::get()));
+ AT_ASSERT(!input->type()->isSubtypeOf(DynamicType::get()));
}
args.emplace_back(loc, "begin", emitExpr(Expr(slice.startOr(0))));
const SourceRange& loc,
Value* sliceable,
const List<Expr>& subscript_exprs) {
- JIT_ASSERT(subscript_exprs.size() == 1);
- JIT_ASSERT(subscript_exprs[0].kind() == TK_SLICE_EXPR);
+ AT_ASSERT(subscript_exprs.size() == 1);
+ AT_ASSERT(subscript_exprs[0].kind() == TK_SLICE_EXPR);
auto slice_exp = SliceExpr(subscript_exprs[0]);
c10::optional<int64_t> maybe_dim;
if (sliceable->type()->isSubtypeOf(DynamicType::get())) {
const SourceRange& loc,
Value* gatherable,
const List<Expr>& subscript_exprs) {
- JIT_ASSERT(subscript_exprs.size() == 1);
+ AT_ASSERT(subscript_exprs.size() == 1);
if (gatherable->type()->kind() == TypeKind::ListType) {
// if it's a list, emit a regular index selection op
const std::vector<Def>& definitions,
const std::vector<Resolver>& resolvers,
const SugaredValuePtr& self) {
- JIT_ASSERT(definitions.size() == resolvers.size());
+ AT_ASSERT(definitions.size() == resolvers.size());
auto resolver_it = resolvers.begin();
std::vector<Method*> methods;
std::unordered_map<std::string, Method*> function_table;
for (const Def& def : definitions) {
const std::string& name = def.name().name();
auto resolver = *resolver_it++;
- JIT_ASSERT(resolver);
+ AT_ASSERT(resolver);
if (!self) {
// if self is defined, then these are methods and do not go into the
// global namespace otherwise, they get defined together so we add them to
};
}
auto creator = [def, resolver, self](Method& method) {
- JIT_ASSERT(resolver);
+ AT_ASSERT(resolver);
to_ir(def, resolver, self, method);
};
Method& method = m->create_method(name, creator);
#include <torch/csrc/jit/script/module.h>
#include <torch/csrc/jit/constants.h>
-#include <torch/csrc/jit/function_schema.h>
#include <torch/csrc/jit/hooks_for_testing.h>
#include <torch/csrc/jit/import_method.h>
#include <torch/csrc/jit/passes/python_print.h>
#include <torch/csrc/api/include/torch/ordered_dict.h>
#include <ATen/ATen.h>
+#include <ATen/core/function_schema.h>
#include <pybind11/functional.h>
#include <cstddef>
namespace jit {
namespace script {
+using ::c10::Argument;
+using ::c10::FunctionSchema;
+
using ResolutionCallback = std::function<py::function(std::string)>;
using FunctionDefaults = std::unordered_map<std::string, py::object>;
for (auto& i : matched_schema->inputs)
new_node->addInput(i);
- JIT_ASSERT(matched_schema->return_types.size() == 1);
+ AT_ASSERT(matched_schema->return_types.size() == 1);
Value* output =
new_node->addOutput()->setType(matched_schema->return_types.at(0));
return std::make_shared<SimpleValue>(output);
#pragma once
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/source_range.h>
#include <torch/csrc/utils/memory.h>
#include <algorithm>
TokenTrie() : kind(0) {}
void insert(const char* str, int tok) {
if (*str == '\0') {
- JIT_ASSERT(kind == 0);
+ AT_ASSERT(kind == 0);
kind = tok;
return;
}
int kind;
size_t start;
size_t length;
- JIT_ASSERT(file);
+ AT_ASSERT(file);
if (!shared.match(
*file,
pos,
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/export.h>
#include <torch/csrc/jit/operator.h>
#include <torch/csrc/jit/script/compiler.h>
Method& callee,
ArrayRef<NamedValue> args,
ArrayRef<NamedValue> kwargs) {
- JIT_ASSERT(!executor);
+ AT_ASSERT(!executor);
std::stringstream failure_messages;
if (auto result = try_emit_call_to(
*graph(),
#pragma once
#include <torch/csrc/autograd/variable.h>
#include <torch/csrc/jit/argument_spec.h>
-#include <torch/csrc/jit/assertions.h>
-#include <torch/csrc/jit/function_schema.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/graph_executor.h>
#include <torch/csrc/jit/ir.h>
#include <torch/csrc/jit/named_value.h>
#include <torch/csrc/api/include/torch/ordered_dict.h>
#include <torch/csrc/utils/memory.h>
+#include <ATen/core/function_schema.h>
#include <c10/util/ArrayRef.h>
#include <c10/util/Optional.h>
namespace jit {
namespace script {
+using ::c10::Argument;
+using ::c10::FunctionSchema;
+
// A method in a module, e.g. f in:
//
// class M(ScriptModule):
optimize(optimize),
member_inputs(std::move(initial_members)),
method_creator(std::move(method_creator)) {
- JIT_ASSERT(graph_->inputs().size() >= member_inputs.size());
+ AT_ASSERT(graph_->inputs().size() >= member_inputs.size());
int i = graph_->inputs().size() - member_inputs.size();
for (at::Tensor* member : member_inputs) {
member_input_index[member] = i++;
ArgumentSpec(with_grad, fmap<IValue>(inputs), inputs.size()));
PropagateInputShapes(retval);
}
- JIT_ASSERT(retval->inputs().size() == inputs.size());
+ AT_ASSERT(retval->inputs().size() == inputs.size());
for (size_t i = 0; i < retval->inputs().size(); ++i) {
auto scalar_type = inputs[i].type().scalarType();
auto sizes = inputs[i].sizes();
at::ArrayRef<Value*> output_values = retval->outputs();
// patch this to still work if we are returning a tuple of multiple values
if (output_values.at(0)->type()->kind() == TupleType::Kind) {
- JIT_ASSERT(output_values.at(0)->node()->kind() == prim::TupleConstruct);
+ AT_ASSERT(output_values.at(0)->node()->kind() == prim::TupleConstruct);
output_values = output_values.at(0)->node()->inputs();
}
- JIT_ASSERT(output_values.size() == outputs.size());
+ AT_ASSERT(output_values.size() == outputs.size());
for (size_t i = 0; i < retval->outputs().size(); ++i) {
auto scalar_type = outputs[i].type().scalarType();
auto sizes = outputs[i].sizes();
}
std::string pretty_print_schema() const {
- JIT_ASSERT(schema);
+ AT_ASSERT(schema);
std::stringstream ss;
ss << *schema;
return ss.str();
const std::string& name,
std::shared_ptr<Graph> graph,
std::vector<at::Tensor*> member_inputs) {
- JIT_ASSERT(graph);
+ AT_ASSERT(graph);
std::unique_ptr<Method> method(new Method(
this,
name,
#pragma once
#include <torch/csrc/WindowsTorchApiMacro.h>
-#include <torch/csrc/jit/function_schema.h>
#include <torch/csrc/jit/named_value.h>
-#include <torch/csrc/jit/type.h>
+#include <ATen/core/jit_type.h>
+
+#include <ATen/core/function_schema.h>
namespace torch {
namespace jit {
};
TORCH_API c10::optional<MatchedSchema> tryMatchSchema(
- const FunctionSchema& schema,
+ const ::c10::FunctionSchema& schema,
const SourceRange& loc,
Graph& graph,
c10::optional<NamedValue> self,
<< "Broadcastable lists only supported for int or float";
auto elem_ptr = ident_to_type_lut().find(value_name);
- JIT_ASSERT(elem_ptr != ident_to_type_lut().end());
+ AT_ASSERT(elem_ptr != ident_to_type_lut().end());
TypePtr list_ptr = ListType::create(elem_ptr->second);
const char* len_c = len.c_str();
#pragma once
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/source_location.h>
#include <algorithm>
--begin_line;
while (end_line < str.size() && str[end_line] != '\n')
++end_line;
- JIT_ASSERT(begin_line == 0 || str[begin_line - 1] == '\n');
- JIT_ASSERT(end_line == str.size() || str[end_line] == '\n');
+ AT_ASSERT(begin_line == 0 || str[begin_line - 1] == '\n');
+ AT_ASSERT(end_line == str.size() || str[end_line] == '\n');
size_t begin_highlight = begin_line; // beginning of context, CONTEXT lines
// before the highlight line
if (i >= CONTEXT)
break;
}
- JIT_ASSERT(begin_highlight == 0 || str[begin_highlight - 1] == '\n');
+ AT_ASSERT(begin_highlight == 0 || str[begin_highlight - 1] == '\n');
size_t end_highlight =
end_line; // end of context, CONTEXT lines after the highlight line
if (i >= CONTEXT)
break;
}
- JIT_ASSERT(end_highlight == str.size() || str[end_highlight] == '\n');
+ AT_ASSERT(end_highlight == str.size() || str[end_highlight] == '\n');
out << str.substr(begin_highlight, end_line - begin_highlight) << "\n";
out << std::string(start() - begin_line, ' ');
#pragma once
#include <ATen/ATen.h>
-#include <torch/csrc/jit/ivalue.h>
+#include <ATen/core/ivalue.h>
namespace torch {
namespace jit {
+using c10::IValue;
using Stack = std::vector<IValue>;
using Operation = std::function<int(Stack&)>;
}
template <typename... Types>
static inline void push(Stack& stack, Types&&... args) {
- std::initializer_list<int>{(stack.emplace_back(std::forward<Types>(args)), 0)...};
+ std::initializer_list<int>{(stack.emplace_back(std::forward<Types>(args)), 0)...};
}
// The packer here is carefully written not to make any unnecessary
return create(aten::cat, {input_list, dim})[0];
}
static SymbolicVariable cat(ArrayRef<SymbolicVariable> inputs, int dim) {
- JIT_ASSERT(inputs.size() > 0);
+ AT_ASSERT(inputs.size() > 0);
return SymbolicVariable::cat(inputs, inputs[0].insertConstant(dim));
}
static SymbolicVariable stack(ArrayRef<SymbolicVariable> inputs, Value* dim) {
return create(aten::stack, {input_list, dim})[0];
}
static SymbolicVariable stack(ArrayRef<SymbolicVariable> inputs, int dim) {
- JIT_ASSERT(inputs.size() > 0);
+ AT_ASSERT(inputs.size() > 0);
return SymbolicVariable::stack(inputs, inputs[0].insertConstant(dim));
}
static std::vector<SymbolicVariable> broadcast_tensors(
ArrayRef<SymbolicVariable> inputs) {
- JIT_ASSERT(inputs.size() > 0);
+ AT_ASSERT(inputs.size() > 0);
Graph* g = inputs[0].value()->owningGraph();
auto value_inputs =
fmap(inputs, [](const SymbolicVariable& v) { return v.value(); });
#include <torch/csrc/autograd/engine.h>
#include <torch/csrc/autograd/function.h>
#include <torch/csrc/autograd/variable.h>
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/passes/dead_code_elimination.h>
#include <torch/csrc/jit/passes/remove_expands.h>
void setValueTrace(const IValue& v, Value* value) {
if (v.isTensor()) {
auto var = v.toTensor();
- JIT_ASSERT(var.defined());
+ AT_ASSERT(var.defined());
getTracingState()->value_map[var] = value;
} else if (v.isTensorList()) {
auto& outputs = v.toTensorList()->elements();
if (!isTracing())
return;
auto& list_trace = stash.intlists.emplace(arg_name, size).first->second;
- JIT_ASSERT(size == list_trace.size());
- JIT_ASSERT(idx < list_trace.size());
- JIT_ASSERT(list_trace[idx] == nullptr);
+ AT_ASSERT(size == list_trace.size());
+ AT_ASSERT(idx < list_trace.size());
+ AT_ASSERT(list_trace[idx] == nullptr);
Value* ten = getValueTrace(var);
auto& g = *ten->owningGraph();
#include <torch/csrc/WindowsTorchApiMacro.h>
#include <torch/csrc/autograd/function_hook.h>
#include <torch/csrc/autograd/variable.h>
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/constants.h>
#include <torch/csrc/jit/ir.h>
#include <torch/csrc/jit/stack.h>
namespace jit {
namespace tracer {
+using ::c10::ivalue::List;
+using ::c10::ivalue::Shared;
+
+using ::c10::IValue;
+using ::c10::ivalue::Future;
+using ::c10::ivalue::Tuple;
+
+using ::c10::ivalue::BoolList;
+using ::c10::ivalue::DoubleList;
+using ::c10::ivalue::GenericList;
+using ::c10::ivalue::IntList;
+using ::c10::ivalue::TensorList;
+
+using ::c10::ivalue::ConstantString;
+
using torch::autograd::Variable;
using variable_list = std::vector<Variable>;
TORCH_API void setValueTrace(const IValue& v, Value* value);
inline void delValueTrace(const Variable& var) {
- JIT_ASSERT(var.defined());
+ AT_ASSERT(var.defined());
getTracingState()->value_map.erase(var);
}
#include <torch/csrc/WindowsTorchApiMacro.h>
#include <torch/csrc/autograd/function_hook.h>
#include <torch/csrc/autograd/variable.h>
-#include <torch/csrc/jit/assertions.h>
+#include <c10/util/Exception.h>
#include <torch/csrc/jit/constants.h>
#include <torch/csrc/jit/stack.h>
-#include <torch/csrc/jit/type.h>
+#include <ATen/core/jit_type.h>
#include <torch/csrc/utils/functional.h>
#include <torch/csrc/utils/variadic.h>
const std::string& arg_name,
size_t idx,
const Variable& var,
- const TypePtr& type = nullptr);
+ const c10::TypePtr& type = nullptr);
static bool hasValue(const std::string& arg_name) {
return stash.values.count(arg_name) > 0;
+++ /dev/null
-#include <ATen/core/jit_type.h>
-
-namespace torch {
-namespace jit {
-
-#define C10_USING(T) using ::c10::T;
-C10_FORALL_TYPES(C10_USING)
-#undef C10_USING
-
-#define C10_USING(T) using ::c10::T##Ptr;
-C10_FORALL_TYPES(C10_USING)
-#undef C10_USING
-
-using ::c10::Type;
-using ::c10::TypeEnv;
-using ::c10::TypePtr;
-
-using ::c10::getTypePtr;
-using ::c10::MatchTypeReturn;
-using ::c10::TypeKind;
-
-} // namespace jit
-} // namespace torch