From: Mikhail Zolotukhin Date: Thu, 24 Jan 2019 19:05:07 +0000 (-0800) Subject: Directly include headers from ATen. X-Git-Tag: accepted/tizen/6.5/unified/20211028.231830~1701 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=47bf30661fb45f106e2b609495d82af463513b99;p=platform%2Fupstream%2Fpytorch.git Directly include headers from ATen. Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/16287 Differential Revision: D13792949 Pulled By: ZolotukhinM fbshipit-source-id: d627d8dc469df048063c70d0b5b8d33fede809a3 --- diff --git a/docs/cpp/source/Doxyfile b/docs/cpp/source/Doxyfile index 15f69dc..8290c9c 100644 --- a/docs/cpp/source/Doxyfile +++ b/docs/cpp/source/Doxyfile @@ -31,6 +31,7 @@ STRIP_FROM_PATH = ../../.. # What folders / files Doxygen should process. INPUT = ../../../aten/src/ATen/ATen.h \ ../../../aten/src/ATen/Backend.h \ + ../../../aten/src/ATen/core/ivalue.h \ ../../../aten/src/ATen/core/ScalarType.h \ ../../../aten/src/ATen/core/Tensor.h \ ../../../aten/src/ATen/cuda/CUDAContext.h \ @@ -57,7 +58,6 @@ INPUT = ../../../aten/src/ATen/ATen.h \ ../../../torch/csrc/autograd/generated/variable_factories.h \ ../../../torch/csrc/jit/custom_operator.h \ ../../../torch/csrc/jit/import.h \ - ../../../torch/csrc/jit/ivalue.h \ ../../../torch/csrc/jit/script/module.h # Don't include .cpp files! FILE_PATTERNS = *.h diff --git a/test/cpp/jit/tests.h b/test/cpp/jit/tests.h index 612597c..02c171f 100644 --- a/test/cpp/jit/tests.h +++ b/test/cpp/jit/tests.h @@ -30,14 +30,14 @@ #include "torch/csrc/autograd/generated/variable_factories.h" #include "torch/csrc/autograd/variable.h" #include "torch/csrc/jit/argument_spec.h" -#include "torch/csrc/jit/assertions.h" +#include "c10/util/Exception.h" #include "torch/csrc/jit/attributes.h" #include "torch/csrc/jit/autodiff.h" #include "torch/csrc/jit/code_template.h" #include "torch/csrc/jit/custom_operator.h" #include "torch/csrc/jit/dynamic_dag.h" #include "torch/csrc/jit/fuser/interface.h" -#include "torch/csrc/jit/interned_strings.h" +#include "ATen/core/interned_strings.h" #include "torch/csrc/jit/interpreter.h" #include "torch/csrc/jit/ir.h" #include "torch/csrc/jit/operator.h" @@ -61,7 +61,7 @@ #include "torch/csrc/autograd/variable.h" #include "torch/csrc/jit/graph_executor.h" -#include "torch/csrc/jit/ivalue.h" +#include "ATen/core/ivalue.h" #include "torch/csrc/jit/script/compiler.h" #include "torch/csrc/jit/script/module.h" @@ -955,7 +955,7 @@ void testRegisterFusionCachesKernel(std::ostream& out = std::cout) { std::find_if(nodes.begin(), nodes.end(), [](const Node* node) { return node->kind() == prim::FusionGroup; }); - JIT_ASSERTM( + AT_CHECK( maybe_fusion_group != nodes.end(), "testRegisterFusionCachesKernel: could not create FusionGroup"); return *maybe_fusion_group; @@ -1596,19 +1596,19 @@ std::unique_ptr> newDynamicDAG() { void testNewVertex() { auto graph = newDynamicDAG(); - JIT_ASSERT(graph->debugNumVertices() == 0); + AT_ASSERT(graph->debugNumVertices() == 0); auto a = graph->newVertex("a"); - JIT_ASSERT(graph->debugNumVertices() == 1); - JIT_ASSERT(a->ord == 0); - JIT_ASSERT(a->data.size() == 1); - JIT_ASSERT(a->data[0] == "a"); - JIT_ASSERT(a->in_edges().size() == 0); - JIT_ASSERT(a->out_edges().size() == 0); + AT_ASSERT(graph->debugNumVertices() == 1); + AT_ASSERT(a->ord == 0); + AT_ASSERT(a->data.size() == 1); + AT_ASSERT(a->data[0] == "a"); + AT_ASSERT(a->in_edges().size() == 0); + AT_ASSERT(a->out_edges().size() == 0); auto b = graph->newVertex("b"); auto c = graph->newVertex("c"); - JIT_ASSERT(graph->debugNumVertices() == 3); - JIT_ASSERT(b->ord == 1); - JIT_ASSERT(c->ord == 2); + AT_ASSERT(graph->debugNumVertices() == 3); + AT_ASSERT(b->ord == 1); + AT_ASSERT(c->ord == 2); } void testAddEdgeBasic() { @@ -1621,18 +1621,18 @@ void testAddEdgeBasic() { graph->addEdge(a, b); graph->addEdge(b, c); graph->addEdge(a, c); - JIT_ASSERT(a->in_edges().size() == 0); - JIT_ASSERT(a->out_edges().size() == 2); - JIT_ASSERT(a->out_edges().contains(b)); - JIT_ASSERT(a->out_edges().contains(c)); - JIT_ASSERT(b->in_edges().size() == 1); - JIT_ASSERT(b->out_edges().size() == 1); - JIT_ASSERT(b->in_edges().contains(a)); - JIT_ASSERT(b->out_edges().contains(c)); - JIT_ASSERT(c->in_edges().size() == 2); - JIT_ASSERT(c->out_edges().size() == 0); - JIT_ASSERT(c->in_edges().contains(a)); - JIT_ASSERT(c->in_edges().contains(b)); + AT_ASSERT(a->in_edges().size() == 0); + AT_ASSERT(a->out_edges().size() == 2); + AT_ASSERT(a->out_edges().contains(b)); + AT_ASSERT(a->out_edges().contains(c)); + AT_ASSERT(b->in_edges().size() == 1); + AT_ASSERT(b->out_edges().size() == 1); + AT_ASSERT(b->in_edges().contains(a)); + AT_ASSERT(b->out_edges().contains(c)); + AT_ASSERT(c->in_edges().size() == 2); + AT_ASSERT(c->out_edges().size() == 0); + AT_ASSERT(c->in_edges().contains(a)); + AT_ASSERT(c->in_edges().contains(b)); } void testAddEdgeCycleDetection() { @@ -1650,7 +1650,7 @@ void testAddEdgeCycleDetection() { } catch (c10::Error& err) { erred = true; } - JIT_ASSERT(erred); + AT_ASSERT(erred); } void testAddEdgeReordersBasic() { @@ -1658,11 +1658,11 @@ void testAddEdgeReordersBasic() { auto graph = newDynamicDAG(); auto a = graph->newVertex("a"); auto b = graph->newVertex("b"); - JIT_ASSERT(a->ord == 0); - JIT_ASSERT(b->ord == 1); + AT_ASSERT(a->ord == 0); + AT_ASSERT(b->ord == 1); graph->addEdge(b, a); - JIT_ASSERT(a->ord == 1); - JIT_ASSERT(b->ord == 0); + AT_ASSERT(a->ord == 1); + AT_ASSERT(b->ord == 0); } void testAddEdgeReordersComplicated() { @@ -1675,29 +1675,29 @@ void testAddEdgeReordersComplicated() { auto d = graph->newVertex("d"); graph->addEdge(a, b); graph->addEdge(c, d); - JIT_ASSERT(a->ord == 0); - JIT_ASSERT(b->ord == 1); - JIT_ASSERT(c->ord == 2); - JIT_ASSERT(d->ord == 3); + AT_ASSERT(a->ord == 0); + AT_ASSERT(b->ord == 1); + AT_ASSERT(c->ord == 2); + AT_ASSERT(d->ord == 3); graph->addEdge(d, a); - JIT_ASSERT(c->ord == 0); - JIT_ASSERT(d->ord == 1); - JIT_ASSERT(a->ord == 2); - JIT_ASSERT(b->ord == 3); - JIT_ASSERT(c->in_edges().size() == 0); - JIT_ASSERT(c->out_edges().size() == 1); - JIT_ASSERT(c->out_edges().contains(d)); - JIT_ASSERT(d->in_edges().size() == 1); - JIT_ASSERT(d->out_edges().size() == 1); - JIT_ASSERT(d->in_edges().contains(c)); - JIT_ASSERT(d->out_edges().contains(a)); - JIT_ASSERT(a->in_edges().size() == 1); - JIT_ASSERT(a->out_edges().size() == 1); - JIT_ASSERT(a->in_edges().contains(d)); - JIT_ASSERT(a->out_edges().contains(b)); - JIT_ASSERT(b->in_edges().size() == 1); - JIT_ASSERT(b->out_edges().size() == 0); - JIT_ASSERT(b->in_edges().contains(a)); + AT_ASSERT(c->ord == 0); + AT_ASSERT(d->ord == 1); + AT_ASSERT(a->ord == 2); + AT_ASSERT(b->ord == 3); + AT_ASSERT(c->in_edges().size() == 0); + AT_ASSERT(c->out_edges().size() == 1); + AT_ASSERT(c->out_edges().contains(d)); + AT_ASSERT(d->in_edges().size() == 1); + AT_ASSERT(d->out_edges().size() == 1); + AT_ASSERT(d->in_edges().contains(c)); + AT_ASSERT(d->out_edges().contains(a)); + AT_ASSERT(a->in_edges().size() == 1); + AT_ASSERT(a->out_edges().size() == 1); + AT_ASSERT(a->in_edges().contains(d)); + AT_ASSERT(a->out_edges().contains(b)); + AT_ASSERT(b->in_edges().size() == 1); + AT_ASSERT(b->out_edges().size() == 0); + AT_ASSERT(b->in_edges().contains(a)); } void testRemoveEdgeBasic() { @@ -1706,11 +1706,11 @@ void testRemoveEdgeBasic() { auto a = graph->newVertex("a"); auto b = graph->newVertex("b"); graph->addEdge(a, b); - JIT_ASSERT(graph->debugNumVertices() == 2); + AT_ASSERT(graph->debugNumVertices() == 2); graph->removeEdge(a, b); - JIT_ASSERT(graph->debugNumVertices() == 2); - JIT_ASSERT(a->out_edges().size() == 0); - JIT_ASSERT(b->in_edges().size() == 0); + AT_ASSERT(graph->debugNumVertices() == 2); + AT_ASSERT(a->out_edges().size() == 0); + AT_ASSERT(b->in_edges().size() == 0); } void testRemoveVertexBasic() { @@ -1721,11 +1721,11 @@ void testRemoveVertexBasic() { auto c = graph->newVertex("c"); graph->addEdge(a, b); graph->addEdge(b, c); - JIT_ASSERT(graph->debugNumVertices() == 3); + AT_ASSERT(graph->debugNumVertices() == 3); graph->removeVertex(b); - JIT_ASSERT(graph->debugNumVertices() == 2); - JIT_ASSERT(a->out_edges().size() == 0); - JIT_ASSERT(c->in_edges().size() == 0); + AT_ASSERT(graph->debugNumVertices() == 2); + AT_ASSERT(a->out_edges().size() == 0); + AT_ASSERT(c->in_edges().size() == 0); } void testContractEdgeBasic() { @@ -1739,18 +1739,18 @@ void testContractEdgeBasic() { graph->addEdge(b, c); graph->addEdge(c, d); graph->contractEdge(b, c); - JIT_ASSERT(graph->debugNumVertices() == 3); - JIT_ASSERT(a->out_edges().size() == 1); - JIT_ASSERT(d->in_edges().size() == 1); - JIT_ASSERT(*a->out_edges().begin() == *d->in_edges().begin()); + AT_ASSERT(graph->debugNumVertices() == 3); + AT_ASSERT(a->out_edges().size() == 1); + AT_ASSERT(d->in_edges().size() == 1); + AT_ASSERT(*a->out_edges().begin() == *d->in_edges().begin()); auto* contracted = *a->out_edges().begin(); - JIT_ASSERT(contracted->data.size() == 2); - JIT_ASSERT(contracted->data[0] == "b"); - JIT_ASSERT(contracted->data[1] == "c"); - JIT_ASSERT(contracted->out_edges().size() == 1); - JIT_ASSERT(contracted->in_edges().size() == 1); - JIT_ASSERT(contracted->in_edges().contains(a)); - JIT_ASSERT(contracted->out_edges().contains(d)); + AT_ASSERT(contracted->data.size() == 2); + AT_ASSERT(contracted->data[0] == "b"); + AT_ASSERT(contracted->data[1] == "c"); + AT_ASSERT(contracted->out_edges().size() == 1); + AT_ASSERT(contracted->in_edges().size() == 1); + AT_ASSERT(contracted->in_edges().contains(a)); + AT_ASSERT(contracted->out_edges().contains(d)); } void testContractEdgeCycleDetection() { @@ -1764,7 +1764,7 @@ void testContractEdgeCycleDetection() { graph->addEdge(a, b); graph->addEdge(b, c); graph->addEdge(a, c); - JIT_ASSERT(!graph->contractEdge(a, c)); + AT_ASSERT(!graph->contractEdge(a, c)); } void testDynamicDAG() { @@ -1888,7 +1888,7 @@ struct TopoMoveTestFixture { curNode = original; size_t idx = 0; while (curNode != n->owningBlock()->return_node()) { - JIT_ASSERT(originalOrdering[idx] == curNode); + AT_ASSERT(originalOrdering[idx] == curNode); if (isAfter) { curNode = curNode->next(); } else { @@ -1905,9 +1905,9 @@ struct TopoMoveTestFixture { const std::string& insertPoint, bool after) { if (after) { - JIT_ASSERT(nodes.at(toInsert)->prev() == nodes.at(insertPoint)); + AT_ASSERT(nodes.at(toInsert)->prev() == nodes.at(insertPoint)); } else { - JIT_ASSERT(nodes.at(toInsert)->next() == nodes.at(insertPoint)); + AT_ASSERT(nodes.at(toInsert)->next() == nodes.at(insertPoint)); } } @@ -1921,49 +1921,49 @@ void testTopologicalMove() { // Check that we are removing `this`'s deps properly when we need to split // `this` and deps (see code for what the hell that means) TopoMoveTestFixture fixture; - JIT_ASSERT(fixture.moveBeforeTopologicallyValid("q", "s")); + AT_ASSERT(fixture.moveBeforeTopologicallyValid("q", "s")); fixture.checkPostCondition("q", "s", false); } // Move after { // Simple move backward TopoMoveTestFixture fixture; - JIT_ASSERT(fixture.moveAfterTopologicallyValid("c", "a")); + AT_ASSERT(fixture.moveAfterTopologicallyValid("c", "a")); fixture.checkPostCondition("c", "a", true); } { // simple invalid move backward TopoMoveTestFixture fixture; - JIT_ASSERT(!fixture.moveAfterTopologicallyValid("d", "a")); + AT_ASSERT(!fixture.moveAfterTopologicallyValid("d", "a")); } { // doesn't actually move anything TopoMoveTestFixture fixture; - JIT_ASSERT(fixture.moveAfterTopologicallyValid("f", "e")); + AT_ASSERT(fixture.moveAfterTopologicallyValid("f", "e")); fixture.checkPostCondition("f", "e", true); } { // move backward with multiple dependencies TopoMoveTestFixture fixture; - JIT_ASSERT(fixture.moveAfterTopologicallyValid("e", "c")); + AT_ASSERT(fixture.moveAfterTopologicallyValid("e", "c")); fixture.checkPostCondition("e", "c", true); } { // Move backward with non-zero working set TopoMoveTestFixture fixture; - JIT_ASSERT(fixture.moveAfterTopologicallyValid("k", "f")); + AT_ASSERT(fixture.moveAfterTopologicallyValid("k", "f")); fixture.checkPostCondition("k", "f", true); } { // Simple move forward TopoMoveTestFixture fixture; - JIT_ASSERT(fixture.moveAfterTopologicallyValid("c", "d")); + AT_ASSERT(fixture.moveAfterTopologicallyValid("c", "d")); fixture.checkPostCondition("c", "d", true); } { // Move forward with non-zero working set TopoMoveTestFixture fixture; - JIT_ASSERT(fixture.moveAfterTopologicallyValid("f", "l")); + AT_ASSERT(fixture.moveAfterTopologicallyValid("f", "l")); fixture.checkPostCondition("f", "l", true); } @@ -1971,41 +1971,41 @@ void testTopologicalMove() { { // Simple move forward TopoMoveTestFixture fixture; - JIT_ASSERT(fixture.moveBeforeTopologicallyValid("b", "d")); + AT_ASSERT(fixture.moveBeforeTopologicallyValid("b", "d")); fixture.checkPostCondition("b", "d", false); } { // Simple move backward TopoMoveTestFixture fixture; - JIT_ASSERT(fixture.moveBeforeTopologicallyValid("c", "a")); + AT_ASSERT(fixture.moveBeforeTopologicallyValid("c", "a")); fixture.checkPostCondition("c", "a", false); } { // doesn't actually move anything TopoMoveTestFixture fixture; - JIT_ASSERT(fixture.moveBeforeTopologicallyValid("a", "b")); + AT_ASSERT(fixture.moveBeforeTopologicallyValid("a", "b")); fixture.checkPostCondition("a", "b", false); } { // move forward with deps TopoMoveTestFixture fixture; - JIT_ASSERT(fixture.moveBeforeTopologicallyValid("f", "m")); + AT_ASSERT(fixture.moveBeforeTopologicallyValid("f", "m")); fixture.checkPostCondition("f", "m", false); } { // move backward with deps TopoMoveTestFixture fixture; - JIT_ASSERT(fixture.moveBeforeTopologicallyValid("l", "f")); + AT_ASSERT(fixture.moveBeforeTopologicallyValid("l", "f")); fixture.checkPostCondition("l", "f", false); } // check that dependencies in blocks are recognized { TopoMoveTestFixture fixture; - JIT_ASSERT(!fixture.moveAfterTopologicallyValid("l", "m")); - JIT_ASSERT(!fixture.moveBeforeTopologicallyValid("m", "l")); - JIT_ASSERT(!fixture.moveAfterTopologicallyValid("n", "l")); - JIT_ASSERT(!fixture.moveBeforeTopologicallyValid("l", "n")); + AT_ASSERT(!fixture.moveAfterTopologicallyValid("l", "m")); + AT_ASSERT(!fixture.moveBeforeTopologicallyValid("m", "l")); + AT_ASSERT(!fixture.moveAfterTopologicallyValid("n", "l")); + AT_ASSERT(!fixture.moveBeforeTopologicallyValid("l", "n")); } // Test that moveAfter(n) and moveBefore(n->next()) are not necessarily @@ -2014,8 +2014,8 @@ void testTopologicalMove() { // `p`) { TopoMoveTestFixture fixture; - JIT_ASSERT(!fixture.moveAfterTopologicallyValid("n", "o")); - JIT_ASSERT(fixture.moveBeforeTopologicallyValid("o", "p")); + AT_ASSERT(!fixture.moveAfterTopologicallyValid("n", "o")); + AT_ASSERT(fixture.moveBeforeTopologicallyValid("o", "p")); fixture.checkPostCondition("o", "p", false); } } @@ -2039,13 +2039,13 @@ void testAliasAnalysis() { auto aliasDb = AliasAnalysis(graph); // Can't move past a mutation of a used value - JIT_ASSERT(!aliasDb.moveAfterTopologicallyValid(c->node(), aMut->node())); - JIT_ASSERT(aliasDb.moveAfterTopologicallyValid(d->node(), c->node())); + AT_ASSERT(!aliasDb.moveAfterTopologicallyValid(c->node(), aMut->node())); + AT_ASSERT(aliasDb.moveAfterTopologicallyValid(d->node(), c->node())); // b should alias to a (since they are both inputs) - JIT_ASSERT( + AT_ASSERT( !aliasDb.moveAfterTopologicallyValid(addsB->node(), aMut->node())); - JIT_ASSERT(aliasDb.moveAfterTopologicallyValid(addsB->node(), c->node())); + AT_ASSERT(aliasDb.moveAfterTopologicallyValid(addsB->node(), c->node())); graph->lint(); } @@ -2063,9 +2063,9 @@ void testAliasAnalysis() { graph->lint(); auto aliasDb = AliasAnalysis(graph); - JIT_ASSERT(!aliasDb.moveAfterTopologicallyValid( + AT_ASSERT(!aliasDb.moveAfterTopologicallyValid( aliasesB->node(), mutatesAliasOfB->node())); - JIT_ASSERT(!aliasDb.moveAfterTopologicallyValid( + AT_ASSERT(!aliasDb.moveAfterTopologicallyValid( usesB->node(), mutatesAliasOfB->node())); } } diff --git a/tools/jit/templates/register_aten_ops.cpp b/tools/jit/templates/register_aten_ops.cpp index 62762d4..a8bf79c 100644 --- a/tools/jit/templates/register_aten_ops.cpp +++ b/tools/jit/templates/register_aten_ops.cpp @@ -2,12 +2,12 @@ #include "torch/csrc/jit/custom_operator.h" #include "torch/csrc/autograd/profiler.h" -#include "torch/csrc/jit/interned_strings.h" #include "torch/csrc/utils/functional.h" #include "torch/csrc/autograd/generated/variable_factories.h" #include +#include #include #include @@ -54,7 +54,7 @@ inline at::optional deviceForInputs(Stack & stack, size_t N) { template std::array as_bool_array(const std::vector& vec) { std::array res; - JIT_ASSERT(vec.size() == N); + AT_ASSERT(vec.size() == N); std::copy(vec.begin(), vec.end(), res.begin()); return res; } diff --git a/torch/csrc/jit/argument_spec.h b/torch/csrc/jit/argument_spec.h index ba9c0cd..7645b1e 100644 --- a/torch/csrc/jit/argument_spec.h +++ b/torch/csrc/jit/argument_spec.h @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include #include @@ -77,7 +77,7 @@ struct ArgumentSpec { for (const auto& i : inputs) { addInput(i, offset, with_grad); } - JIT_ASSERT(offset == num_flat_inputs); + AT_ASSERT(offset == num_flat_inputs); } void addInput(const IValue& input, size_t& offset, bool with_grad) { diff --git a/torch/csrc/jit/assertions.h b/torch/csrc/jit/assertions.h deleted file mode 100644 index 51dc4b5..0000000 --- a/torch/csrc/jit/assertions.h +++ /dev/null @@ -1,6 +0,0 @@ -#pragma once - -#include - -#define JIT_ASSERT AT_ASSERT -#define JIT_ASSERTM AT_CHECK diff --git a/torch/csrc/jit/attributes.h b/torch/csrc/jit/attributes.h index 500fa64..7ded022 100644 --- a/torch/csrc/jit/attributes.h +++ b/torch/csrc/jit/attributes.h @@ -6,18 +6,21 @@ #include #include -#include +#include +#include namespace torch { namespace jit { +using ::c10::Symbol; + constexpr int max_tensor_display_size = 10; enum class AttributeKind { f, fs, i, is, s, ss, t, ts, g, gs }; static inline const char* toString(AttributeKind kind) { static const char* names[] = { "f", "fs", "i", "is", "s", "ss", "t", "ts", "g", "gs"}; - JIT_ASSERT(size_t(kind) < sizeof(names) / sizeof(AttributeKind)); + AT_ASSERT(size_t(kind) < sizeof(names) / sizeof(AttributeKind)); return names[int(kind)]; } diff --git a/torch/csrc/jit/autodiff.cpp b/torch/csrc/jit/autodiff.cpp index 5bec04b..fdfa031 100644 --- a/torch/csrc/jit/autodiff.cpp +++ b/torch/csrc/jit/autodiff.cpp @@ -4,13 +4,13 @@ #include #include #include +#include +#include +#include #include #include -#include "torch/csrc/jit/passes/lower_tuples.h" -#include "torch/csrc/jit/script/compiler.h" -#include "torch/csrc/jit/symbolic_script.h" -#include +#include #include #include @@ -613,7 +613,7 @@ class GradientHelper { } else if ( node->matches( "aten::avg_pool2d(Tensor self, int[] kernel_size, int[] stride, int[] padding, bool ceil_mode, bool count_include_pad) -> Tensor")) { - JIT_ASSERT(grads.size() == 1); + AT_ASSERT(grads.size() == 1); auto graph = node->owningGraph(); auto backward_value = graph->insert( aten::avg_pool2d_backward, @@ -634,7 +634,7 @@ class GradientHelper { } else if ( node->matches( "aten::max_pool2d_with_indices(Tensor self, int[] kernel_size, int[] stride, int[] padding, int[] dilation, bool ceil_mode) -> (Tensor, Tensor)")) { - JIT_ASSERT(grads.size() == 2); + AT_ASSERT(grads.size() == 2); auto graph = node->owningGraph(); auto backward_value = graph->insert( aten::max_pool2d_with_indices_backward, @@ -673,7 +673,7 @@ class GradientHelper { Node* tuple_unpack_node = graph->insertNode(graph->createTupleUnpack(backward_value)); auto tuple_outputs = tuple_unpack_node->outputs(); - JIT_ASSERT(tuple_outputs.size() == size_t(3)); + AT_ASSERT(tuple_outputs.size() == size_t(3)); return {tuple_outputs[0], tuple_outputs[1], nullptr, @@ -702,7 +702,7 @@ class GradientHelper { Node* tuple_unpack_node = graph->insertNode(graph->createTupleUnpack(backward_value)); auto tuple_outputs = tuple_unpack_node->outputs(); - JIT_ASSERT(tuple_outputs.size() == size_t(3)); + AT_ASSERT(tuple_outputs.size() == size_t(3)); return {tuple_outputs[0], tuple_outputs[1], tuple_outputs[2], @@ -735,7 +735,7 @@ class GradientHelper { } else if (node->matches( "aten::log_softmax(Tensor self, int dim) -> Tensor")) { - JIT_ASSERT(grads.size() == 1); + AT_ASSERT(grads.size() == 1); auto graph = node->owningGraph(); auto backward_value = graph->insert( aten::_log_softmax_backward_data, @@ -866,7 +866,7 @@ static ReverseDetails addReverseInline(Gradient& grad_desc) { linearGradientForNode(node, fmap(node->outputs(), get_grad)); LowerSimpleTuples(reverse_block); - JIT_ASSERT(grad_inputs.size() == node->inputs().size()); + AT_ASSERT(grad_inputs.size() == node->inputs().size()); for (size_t i = 0, num_inputs = grad_inputs.size(); i < num_inputs; ++i) { if (!inputs[i]->requires_grad()) continue; @@ -938,7 +938,7 @@ static void liftConstants(Gradient& grad_desc, ReverseDetails& rev_info) { Block* reverse_block = rev_info.reverse_block; for (Node* top_node : reverse_block->nodes()) { - JIT_ASSERT( + AT_ASSERT( top_node->kind() == prim::GradOf || top_node->kind() == prim::AutogradAdd || top_node->kind() == prim::Undefined); @@ -1151,7 +1151,7 @@ static void lambdaLiftReverse(Gradient& grad_desc, ReverseDetails& rev_info) { Gradient differentiate(std::shared_ptr& graph) { Gradient grad_desc; // Take ownership of the graph - JIT_ASSERTM( + AT_CHECK( graph.use_count() == 1, "differentiate will mutate and destroy the graph, so it requires " "graph.use_count() == 1, but found %d", diff --git a/torch/csrc/jit/constants.h b/torch/csrc/jit/constants.h index 3a787f0..badfefd 100644 --- a/torch/csrc/jit/constants.h +++ b/torch/csrc/jit/constants.h @@ -1,6 +1,6 @@ #pragma once #include -#include +#include #include #include @@ -10,6 +10,8 @@ namespace torch { namespace jit { +using ::c10::IValue; + struct Graph; struct Value; diff --git a/torch/csrc/jit/custom_operator.h b/torch/csrc/jit/custom_operator.h index 33e3865..4d82bed 100644 --- a/torch/csrc/jit/custom_operator.h +++ b/torch/csrc/jit/custom_operator.h @@ -1,17 +1,21 @@ #pragma once -#include #include #include #include #include +#include #include #include namespace torch { namespace jit { namespace detail { + +using ::c10::Argument; +using ::c10::FunctionSchema; + /// Checks the static C++ type `T` for correctness to catch common error cases. template void checkStaticTypes() { diff --git a/torch/csrc/jit/dynamic_dag.h b/torch/csrc/jit/dynamic_dag.h index 79dce52..b5b91d6 100644 --- a/torch/csrc/jit/dynamic_dag.h +++ b/torch/csrc/jit/dynamic_dag.h @@ -115,7 +115,7 @@ struct visited_list { } void push_back(Vertex* elt) { - JIT_ASSERT(!elt->visited_); + AT_ASSERT(!elt->visited_); elt->visited_ = true; data_.push_back(elt); } @@ -224,9 +224,9 @@ Vertex* DynamicDAG::newVertex(T datum) { template void DynamicDAG::removeEdge(Vertex* producer, Vertex* consumer) { - JIT_ASSERT(producer != consumer); - JIT_ASSERT(producer->out_edges().contains(consumer)); - JIT_ASSERT(consumer->in_edges().contains(producer)); + AT_ASSERT(producer != consumer); + AT_ASSERT(producer->out_edges().contains(consumer)); + AT_ASSERT(consumer->in_edges().contains(producer)); producer->out_edges().erase(consumer); consumer->in_edges().erase(producer); } @@ -347,7 +347,7 @@ IOEdges DynamicDAG::removeVertex(Vertex* v) { */ template void DynamicDAG::addEdge(Vertex* producer, Vertex* consumer) { - JIT_ASSERT(producer != consumer); + AT_ASSERT(producer != consumer); // NB: DynamicDAG is a simple graph. If an edge exists already, don't do // anything. @@ -355,7 +355,7 @@ void DynamicDAG::addEdge(Vertex* producer, Vertex* consumer) { if (!is_distinct) return; is_distinct = consumer->in_edges().insert(producer); - JIT_ASSERT(is_distinct); + AT_ASSERT(is_distinct); if (producer->ord <= consumer->ord) { // topological ordering is already consistent, no need to update. @@ -379,7 +379,7 @@ void DynamicDAG::addEdge(Vertex* producer, Vertex* consumer) { // Search for vertices that can reach producer that have a now incorrect // topological ordering - JIT_ASSERT(!dfsSearch( + AT_ASSERT(!dfsSearch( DFSDirection::backward, producer, consumer, @@ -400,7 +400,7 @@ void DynamicDAG::addEdge(Vertex* producer, Vertex* consumer) { // |in_edges(consumer)|)) template bool DynamicDAG::contractEdge(Vertex* producer, Vertex* consumer) { - JIT_ASSERT(producer != consumer); + AT_ASSERT(producer != consumer); if (contractionProducesCycle(producer, consumer)) { return false; } diff --git a/torch/csrc/jit/export.cpp b/torch/csrc/jit/export.cpp index 44be035..a3b63e7 100644 --- a/torch/csrc/jit/export.cpp +++ b/torch/csrc/jit/export.cpp @@ -5,7 +5,7 @@ #include #include -#include +#include #include #include #include @@ -69,7 +69,7 @@ void validateBlock( onnx_torch::OperatorExportTypes::ONNX_ATEN_FALLBACK) { WithInsertPoint guard(node); auto* new_node = b->owningGraph()->insertNode(b->owningGraph()->create( - Symbol(::torch::jit::onnx::ATen), + Symbol(::c10::onnx::ATen), node->inputs(), node->outputs().size())); for (size_t i = 0; i < node->outputs().size(); ++i) { @@ -216,7 +216,7 @@ void EncoderBase::EncodeBlock( onnx::GraphProto* graph_proto, const Block* block, const std::vector& initializers) { - JIT_ASSERT(graph_proto != nullptr); + AT_ASSERT(graph_proto != nullptr); std::string block_name = "torch-jit-export"; if (num_blocks_) { block_name += std::to_string(num_blocks_); @@ -259,10 +259,10 @@ void EncoderBase::EncodeBlock( EncodeIntermediateValueInfo(graph_proto, output); } if (is_raw_export) { - JIT_ASSERT(!node->kind().is_onnx()); + AT_ASSERT(!node->kind().is_onnx()); p_n->set_domain(node->kind().domainString()); } else if (operator_export_type_ == onnx_torch::OperatorExportTypes::ONNX) { - JIT_ASSERT(node->kind().is_onnx()); + AT_ASSERT(node->kind().is_onnx()); } p_n->set_op_type(node->kind().toUnqualString()); for (auto attr_name : node->attributeNames()) { @@ -277,8 +277,8 @@ void EncoderBase::EncodeBlock( EncodeBlock(graph, block, initializers); } } - if (node->kind() == torch::jit::onnx::Loop) { - JIT_ASSERT(node->blocks().size() == 1); + if (node->kind() == ::c10::onnx::Loop) { + AT_ASSERT(node->blocks().size() == 1); auto body = p_n->add_attribute(); body->set_name("body"); @@ -286,8 +286,8 @@ void EncoderBase::EncodeBlock( auto g = body->mutable_g(); EncodeBlock(g, node->blocks()[0]); } - if (node->kind() == torch::jit::onnx::If) { - JIT_ASSERT(node->blocks().size() == 2); + if (node->kind() == ::c10::onnx::If) { + AT_ASSERT(node->blocks().size() == 2); auto true_branch = p_n->add_attribute(); true_branch->set_name("then_branch"); @@ -303,7 +303,7 @@ void EncoderBase::EncodeBlock( } } auto num_initializers = initializers.size(); - JIT_ASSERT(block->inputs().size() >= num_initializers); + AT_ASSERT(block->inputs().size() >= num_initializers); size_t inputs_count = block->inputs().size() - num_initializers; for (auto& tensor : initializers) { // TODO: stop using positions to determine which initializers @@ -320,7 +320,7 @@ void EncoderBase::AddAttribute( const jit::Node* node, const jit::Symbol name) { auto attr = node_proto->add_attribute(); - JIT_ASSERT(name.is_attr()); + AT_ASSERT(name.is_attr()); attr->set_name(name.toUnqualString()); switch (node->kindOf(name)) { case AttributeKind::f: @@ -439,12 +439,12 @@ void GraphEncoder::EncodeTensor( if (defer_weight_export_ && external_ref) { // For now, we use the name of the tensor as the external lookup name to // avoid ONNX protobuf changes. - JIT_ASSERT(external_ref.value() == tensor_proto->name()); - JIT_ASSERT(raw_data_export_map_.count(external_ref.value()) == 0); + AT_ASSERT(external_ref.value() == tensor_proto->name()); + AT_ASSERT(raw_data_export_map_.count(external_ref.value()) == 0); raw_data_export_map_[external_ref.value()] = t; tensor_proto->set_raw_data("__EXTERNAL"); } else { - JIT_ASSERT(t.is_contiguous()); + AT_ASSERT(t.is_contiguous()); tensor_proto->set_raw_data(std::string( static_cast(t.data_ptr()), t.type().elementSizeInBytes() * t.numel())); diff --git a/torch/csrc/jit/function_schema.h b/torch/csrc/jit/function_schema.h deleted file mode 100644 index 350783c..0000000 --- a/torch/csrc/jit/function_schema.h +++ /dev/null @@ -1,10 +0,0 @@ -#include - -namespace torch { -namespace jit { - -using ::c10::Argument; -using ::c10::FunctionSchema; - -} // namespace jit -} // namespace torch diff --git a/torch/csrc/jit/fuser/codegen.cpp b/torch/csrc/jit/fuser/codegen.cpp index b62589c..c10cc23 100644 --- a/torch/csrc/jit/fuser/codegen.cpp +++ b/torch/csrc/jit/fuser/codegen.cpp @@ -1,7 +1,7 @@ #include #include -#include +#include #include #include #include @@ -217,7 +217,7 @@ static std::string encodeRHS(const Node* n) { } else if (val.isBool()) { return scalarValue(val.toBool()); } else { - JIT_ASSERT(val.isInt()); + AT_ASSERT(val.isInt()); return scalarValue(val.toInt()); } } @@ -317,7 +317,7 @@ std::string generateKernel( // Acquires input values bool has_half_tensor = false; size_t formal_count = 0; - for (const auto input : inputs) { + for (const auto& input : inputs) { auto p = input.first; env.s("node", valueName(p)); env.d("formal", formal_count++); @@ -328,7 +328,7 @@ std::string generateKernel( // Access for other types is common to CUDA and CPU kernels. const auto is_half = (input.second.scalar_type == at::ScalarType::Half); if (is_half) { - JIT_ASSERT(use_cuda); + AT_ASSERT(use_cuda); env.s( "access", format("__half2float(t${formal}.data[t${formal}_offset])", env)); @@ -353,7 +353,7 @@ std::string generateKernel( if (n->kind() == prim::ConstantChunk) continue; if (n->kind() == aten::rand_like) { - JIT_ASSERT(use_cuda); + AT_ASSERT(use_cuda); has_random = true; } env.s("node", valueName(n->output())); @@ -372,7 +372,7 @@ std::string generateKernel( // Note: conversion to half is only supported for CUDA kernels. const auto is_half = (output.second.scalar_type == at::ScalarType::Half); if (is_half) { - JIT_ASSERT(use_cuda); + AT_ASSERT(use_cuda); body << format("${access} = __float2half(${node});\n", env); has_half_tensor = true; } else { diff --git a/torch/csrc/jit/fuser/compiler.cpp b/torch/csrc/jit/fuser/compiler.cpp index 12e0bce..7db21c8 100644 --- a/torch/csrc/jit/fuser/compiler.cpp +++ b/torch/csrc/jit/fuser/compiler.cpp @@ -1,7 +1,8 @@ #include #include -#include +#include +#include #include #include #include @@ -10,8 +11,6 @@ #include #include #include -#include -#include "torch/csrc/jit/fuser/interface.h" #include #include @@ -174,7 +173,7 @@ int64_t registerFusion(const Node* fusion_group) { // be a valid spec (must have had upfrontCompilation run on it). const auto key = store(graph); const auto maybe_retrieved_spec = retrieve(key); - JIT_ASSERT(maybe_retrieved_spec); + AT_ASSERT(maybe_retrieved_spec); upfrontCompilation(**maybe_retrieved_spec); return key; diff --git a/torch/csrc/jit/fuser/cpu/dynamic_library.h b/torch/csrc/jit/fuser/cpu/dynamic_library.h index 0d380ee..a3b85fa 100644 --- a/torch/csrc/jit/fuser/cpu/dynamic_library.h +++ b/torch/csrc/jit/fuser/cpu/dynamic_library.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include namespace torch { diff --git a/torch/csrc/jit/fuser/cpu/dynamic_library_unix.cpp b/torch/csrc/jit/fuser/cpu/dynamic_library_unix.cpp index 1289a70..ce6918f 100644 --- a/torch/csrc/jit/fuser/cpu/dynamic_library_unix.cpp +++ b/torch/csrc/jit/fuser/cpu/dynamic_library_unix.cpp @@ -1,5 +1,4 @@ - -#include +#include #include #include @@ -23,7 +22,7 @@ DynamicLibrary::DynamicLibrary(const char* name) { } void* DynamicLibrary::sym(const char* name) { - JIT_ASSERT(handle); + AT_ASSERT(handle); return checkDL(dlsym(handle, name)); } diff --git a/torch/csrc/jit/fuser/cpu/dynamic_library_win.cpp b/torch/csrc/jit/fuser/cpu/dynamic_library_win.cpp index 4393a61..5d19686 100644 --- a/torch/csrc/jit/fuser/cpu/dynamic_library_win.cpp +++ b/torch/csrc/jit/fuser/cpu/dynamic_library_win.cpp @@ -1,4 +1,4 @@ -#include +#include #include #include diff --git a/torch/csrc/jit/fuser/cpu/fused_kernel.cpp b/torch/csrc/jit/fuser/cpu/fused_kernel.cpp index 6cb070b..c044aca 100644 --- a/torch/csrc/jit/fuser/cpu/fused_kernel.cpp +++ b/torch/csrc/jit/fuser/cpu/fused_kernel.cpp @@ -1,5 +1,5 @@ #include -#include +#include #include #include #include @@ -88,7 +88,7 @@ static void runCompiler( config.openmp = false; // disable for future compiles return runCompiler(cpp_file, so_file); } - JIT_ASSERTM(r == 0, "Failed to compile a fused CPU kernel"); + AT_CHECK(r == 0, "Failed to compile a fused CPU kernel"); } static const std::string disas_string = "objdump -M intel -d \"${so_file}\""; @@ -97,7 +97,7 @@ static void disas(const std::string& so_file) { env.s("so_file", so_file); std::string cmd = format(disas_string, env); int r = system(cmd.c_str()); - JIT_ASSERT(r == 0); + AT_ASSERT(r == 0); } FusedKernelCPU::FusedKernelCPU( diff --git a/torch/csrc/jit/fuser/cpu/temp_file.h b/torch/csrc/jit/fuser/cpu/temp_file.h index fd782a1..009f66e 100644 --- a/torch/csrc/jit/fuser/cpu/temp_file.h +++ b/torch/csrc/jit/fuser/cpu/temp_file.h @@ -2,7 +2,7 @@ #include #include -#include +#include #include #include @@ -23,7 +23,7 @@ struct TempFile { // so we make a copy of the string here, including null terminator std::vector tt(t.c_str(), t.c_str() + t.size() + 1); int fd = mkstemps(tt.data(), suffix); - JIT_ASSERT(fd != -1); + AT_ASSERT(fd != -1); file_ = fdopen(fd, "r+"); // - 1 becuase tt.size() includes the null terminator, @@ -41,7 +41,7 @@ struct TempFile { void write(const std::string& str) { size_t result = fwrite(str.c_str(), 1, str.size(), file_); - JIT_ASSERT(str.size() == result); + AT_ASSERT(str.size() == result); } FILE* file() { diff --git a/torch/csrc/jit/fuser/cuda/fused_kernel.cpp b/torch/csrc/jit/fuser/cuda/fused_kernel.cpp index bd287d7..b621aa2 100644 --- a/torch/csrc/jit/fuser/cuda/fused_kernel.cpp +++ b/torch/csrc/jit/fuser/cuda/fused_kernel.cpp @@ -48,7 +48,7 @@ static void getMajorMinor( TORCH_NVRTC_CHECK(nvrtcVersion(&nvrtc_major, &nvrtc_minor)); // Short-circuits if NVRTC version too low - JIT_ASSERT(nvrtc_major >= 6); + AT_ASSERT(nvrtc_major >= 6); // Major and minor is determined by device properties and // possibly "downcompiled" to a lower (compatible) compute architecture diff --git a/torch/csrc/jit/fuser/executor.cpp b/torch/csrc/jit/fuser/executor.cpp index 446bd6f..dc66d3d 100644 --- a/torch/csrc/jit/fuser/executor.cpp +++ b/torch/csrc/jit/fuser/executor.cpp @@ -133,7 +133,7 @@ static std::vector computeMapSize( const at::Tensor& tensor, const PartitionDesc& chunkDesc) { std::vector sizes(tensor.sizes().begin(), tensor.sizes().end()); - JIT_ASSERT(sizes[chunkDesc.dim()] % chunkDesc.nSubTensors() == 0); + AT_ASSERT(sizes[chunkDesc.dim()] % chunkDesc.nSubTensors() == 0); sizes[chunkDesc.dim()] /= chunkDesc.nSubTensors(); return sizes; } @@ -153,7 +153,7 @@ static void compressContiguous( size_t total_size = sizes[cur]; cur++; while (cont[cur - 1] && cur < ndim) { - JIT_ASSERT(strides[cur - 1] == sizes[cur] * strides[cur]); + AT_ASSERT(strides[cur - 1] == sizes[cur] * strides[cur]); total_size *= sizes[cur]; cur++; } @@ -163,7 +163,7 @@ static void compressContiguous( } if (ndim > 0) - JIT_ASSERT(!cont.back() || strides.back() == 1); + AT_ASSERT(!cont.back() || strides.back() == 1); } // Launches the requested fusion on the given device with the given inputs. @@ -174,7 +174,7 @@ void launchFusion( const at::ArrayRef& inputs, std::vector& outputs) { // Fails if fusion and given inputs disagree - JIT_ASSERT(inputs.size() == fusion.inputDesc().size()); + AT_ASSERT(inputs.size() == fusion.inputDesc().size()); // Computes number of flattened inputs and outputs size_t flat_inputs_size = 0; @@ -188,7 +188,7 @@ void launchFusion( // a 32-bit integer. // Note: this code assumes that inputs are 32-bit addressable // Note: this code assumes that all inputs are of the same size - JIT_ASSERT(inputs[0].numel() <= std::numeric_limits::max()); + AT_ASSERT(inputs[0].numel() <= std::numeric_limits::max()); // Computes map_size, numel from the first input at::IntList map_size; @@ -223,7 +223,7 @@ void launchFusion( at::IntList sizes, at::IntList strides) { const auto nDim = desc.nDim(); // NOTE: this is the compressed dim - JIT_ASSERT(nDim <= uncompressedDim); // We'd overflow the space otherwise + AT_ASSERT(nDim <= uncompressedDim); // We'd overflow the space otherwise auto ti = reinterpret_cast(buffer_next); ti->data = data_ptr; compressContiguous( @@ -293,7 +293,7 @@ bool runFusion(const int64_t key, Stack& stack) { // Acquires the FusionSpec auto maybe_spec = retrieve(key); - JIT_ASSERT(maybe_spec); + AT_ASSERT(maybe_spec); auto& spec = *(*maybe_spec); // Acquires inputs from stack @@ -333,7 +333,7 @@ bool runFusion(const int64_t key, Stack& stack) { spec.cacheKernel(arg_spec, kernel); } maybe_kernel = spec.findKernel(arg_spec); - JIT_ASSERT(maybe_kernel); + AT_ASSERT(maybe_kernel); // Launches fusion std::vector outputs; diff --git a/torch/csrc/jit/fuser/partition_desc.h b/torch/csrc/jit/fuser/partition_desc.h index 5c57d3e..af8a210 100644 --- a/torch/csrc/jit/fuser/partition_desc.h +++ b/torch/csrc/jit/fuser/partition_desc.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include #include #include @@ -21,7 +21,7 @@ struct TORCH_API PartitionDesc { PartitionDesc(const TensorDesc& _desc, size_t _nSubTensors, size_t _dim) : nSubTensors_{_nSubTensors}, dim_{_dim} { - JIT_ASSERT(nSubTensors_ > 1); + AT_ASSERT(nSubTensors_ > 1); std::vector cont = _desc.contiguity; if (dim_ > 0) { // when we narrow the concatenated output/chunked input diff --git a/torch/csrc/jit/fuser/tensor_desc.h b/torch/csrc/jit/fuser/tensor_desc.h index 908c189..331c6d0 100644 --- a/torch/csrc/jit/fuser/tensor_desc.h +++ b/torch/csrc/jit/fuser/tensor_desc.h @@ -2,8 +2,8 @@ #include #include -#include -#include +#include +#include #include #include @@ -41,7 +41,7 @@ struct TORCH_API TensorDesc { TensorDesc(const at::Tensor& t) : TensorDesc(t.type().scalarType(), t.sizes(), t.strides()) {} - TensorDesc(const CompleteTensorTypePtr& type) + TensorDesc(const c10::CompleteTensorTypePtr& type) : TensorDesc(type->scalarType(), type->sizes(), type->strides()) {} // number of dimensions after contiguity compression @@ -57,7 +57,7 @@ struct TORCH_API TensorDesc { static std::vector findContiguous( const at::IntList& sizes, const at::IntList& strides) { - JIT_ASSERT(sizes.size() == strides.size()); + AT_ASSERT(sizes.size() == strides.size()); std::vector cont(sizes.size()); for (size_t i = 0; i < sizes.size(); ++i) { const auto expected_stride = diff --git a/torch/csrc/jit/graph_executor.cpp b/torch/csrc/jit/graph_executor.cpp index 6d4552f..a46aea1 100644 --- a/torch/csrc/jit/graph_executor.cpp +++ b/torch/csrc/jit/graph_executor.cpp @@ -2,12 +2,12 @@ #include #include -#include +#include #include #include #include #include -#include +#include #include #include #include @@ -101,7 +101,7 @@ struct DifferentiableGraphBackward : public autograd::Function { } executor.run(stack); - JIT_ASSERT(stack.size() == num_outputs()); + AT_ASSERT(stack.size() == num_outputs()); variable_list outputs; outputs.reserve(num_outputs()); @@ -256,7 +256,7 @@ struct DifferentiableGraphOp { }; void packGradient(Gradient gradient, Node* dnode) { - JIT_ASSERT(dnode->kind() == prim::DifferentiableGraph); + AT_ASSERT(dnode->kind() == prim::DifferentiableGraph); dnode->g_(attr::Subgraph, gradient.f) ->g_(attr::ReverseSubgraph, gradient.df) ->i_(attr::f_real_outputs, gradient.f_real_outputs) @@ -271,7 +271,7 @@ void packGradient(Gradient gradient, Node* dnode) { } Gradient getGradient(const Node* n) { - JIT_ASSERT(n->kind() == prim::DifferentiableGraph); + AT_ASSERT(n->kind() == prim::DifferentiableGraph); Gradient grad; grad.f = n->g(attr::Subgraph); grad.df = n->g(attr::ReverseSubgraph); @@ -377,7 +377,7 @@ struct GraphExecutorImpl { } std::shared_ptr graphFor(const Stack& stack) const { - JIT_ASSERT(stack.size() >= num_inputs); + AT_ASSERT(stack.size() >= num_inputs); auto inputs = last(stack, num_inputs); ArgumentSpec spec( autograd::GradMode::is_enabled(), inputs, num_flat_inputs); diff --git a/torch/csrc/jit/graph_node_list.h b/torch/csrc/jit/graph_node_list.h index c20cb72..ba3a25d 100644 --- a/torch/csrc/jit/graph_node_list.h +++ b/torch/csrc/jit/graph_node_list.h @@ -1,6 +1,6 @@ #pragma once -#include +#include namespace torch { namespace jit { @@ -62,7 +62,7 @@ struct generic_graph_node_list_iterator { return cur; } generic_graph_node_list_iterator& operator++() { - JIT_ASSERT(cur); + AT_ASSERT(cur); cur = cur->next_in_graph[d]; return *this; } @@ -72,7 +72,7 @@ struct generic_graph_node_list_iterator { return old; } generic_graph_node_list_iterator& operator--() { - JIT_ASSERT(cur); + AT_ASSERT(cur); cur = cur->next_in_graph[reverseDir()]; return *this; } diff --git a/torch/csrc/jit/import.cpp b/torch/csrc/jit/import.cpp index 729babc..817c4f9 100644 --- a/torch/csrc/jit/import.cpp +++ b/torch/csrc/jit/import.cpp @@ -1,7 +1,7 @@ #include #include -#include +#include #include #include #include diff --git a/torch/csrc/jit/init.cpp b/torch/csrc/jit/init.cpp index 620ad9c..2fc292f 100644 --- a/torch/csrc/jit/init.cpp +++ b/torch/csrc/jit/init.cpp @@ -4,7 +4,6 @@ #include #include #include -#include #include #include #include @@ -43,6 +42,8 @@ #include +#include + #include #include @@ -55,6 +56,8 @@ namespace torch { namespace jit { +using ::c10::Argument; +using ::c10::FunctionSchema; using caffe2::serialize::PyTorchStreamReader; using caffe2::serialize::PyTorchStreamWriter; @@ -145,7 +148,7 @@ void initJITBindings(PyObject* module) { with_grad, evilDeprecatedBadCreateStackDoNotUse(inputs, graph->inputs())); auto graph_inputs = graph->inputs(); - JIT_ASSERT(spec.size() == graph_inputs.size()); + AT_ASSERT(spec.size() == graph_inputs.size()); for (size_t i = 0; i < graph_inputs.size(); ++i) { graph_inputs[i]->setType(spec.at(i)); } diff --git a/torch/csrc/jit/interned_strings.h b/torch/csrc/jit/interned_strings.h deleted file mode 100644 index 03dabe6..0000000 --- a/torch/csrc/jit/interned_strings.h +++ /dev/null @@ -1,20 +0,0 @@ -#include - -namespace torch { -namespace jit { - -namespace prim { -using namespace ::c10::prim; -} -namespace attr { -using namespace ::c10::attr; -} -namespace aten { -using namespace ::c10::aten; -} -namespace onnx { -using namespace ::c10::onnx; -} -using ::c10::Symbol; -} // namespace jit -} // namespace torch diff --git a/torch/csrc/jit/interpreter.cpp b/torch/csrc/jit/interpreter.cpp index 2e979b1..6d1fa9c 100644 --- a/torch/csrc/jit/interpreter.cpp +++ b/torch/csrc/jit/interpreter.cpp @@ -6,11 +6,11 @@ #include #include #include -#include +#include #include #include #include -#include +#include #include #include #include @@ -234,7 +234,7 @@ std::unordered_map> findLastUses(Graph& g) { // In other words, we find the first program point for v that // _reverse_ dominates the definition of v, and add a drop point there. Node* same_depth_node = findOwnerInBlock(n, v->node()->owningBlock()); - JIT_ASSERT( + AT_ASSERT( same_depth_node); // failure means v is not in scope for n, use lint! // In the case where v and n are in the same block, just mark @@ -379,7 +379,7 @@ struct CodeImpl { // jump when input is false void createJumpFalse(int from_inst, int to_inst) { auto& inst = instructions[from_inst]; - JIT_ASSERT(inst.debug_name == prim::Placeholder); + AT_ASSERT(inst.debug_name == prim::Placeholder); auto offset = relativeJump(from_inst, to_inst); inst.callback = [offset](Stack& stack) { auto t = pop(stack).toBool(); @@ -391,7 +391,7 @@ struct CodeImpl { // jump when input is true void createJumpTrue(int from_inst, int to_inst) { auto& inst = instructions[from_inst]; - JIT_ASSERT(inst.debug_name == prim::Placeholder); + AT_ASSERT(inst.debug_name == prim::Placeholder); auto offset = relativeJump(from_inst, to_inst); inst.callback = [offset](Stack& stack) { auto t = pop(stack).toBool(); @@ -402,7 +402,7 @@ struct CodeImpl { void createJump(int from_inst, int to_inst) { auto& inst = instructions[from_inst]; - JIT_ASSERT(inst.debug_name == prim::Placeholder); + AT_ASSERT(inst.debug_name == prim::Placeholder); auto offset = relativeJump(from_inst, to_inst); inst.callback = [=](Stack& stack) { return offset; }; inst.debug_name = prim::Jump; @@ -577,7 +577,7 @@ struct CodeImpl { list.size = 0; } void listInsert(ListHandle& list, int value) { - JIT_ASSERTM( + AT_CHECK( list.start + list.size == (int)int_data.size(), "another list already started"); int_data.push_back(value); @@ -588,7 +588,7 @@ struct CodeImpl { list.size = 0; } void listInsert(ListHandle& list, int value) { - JIT_ASSERTM( + AT_CHECK( list.start + list.size == (int)bool_data.size(), "another list already started"); bool_data.push_back(value); @@ -599,11 +599,11 @@ struct CodeImpl { void aliasRegistersTo( ArrayRef new_allocations, ArrayRef existing_allocations) { - JIT_ASSERT(new_allocations.size() == existing_allocations.size()); + AT_ASSERT(new_allocations.size() == existing_allocations.size()); for (size_t i = 0; i < new_allocations.size(); ++i) { auto n = new_allocations[i]->unique(); auto e = existing_allocations[i]->unique(); - JIT_ASSERT(unique_to_reg.count(e) > 0 && unique_to_reg.count(n) == 0); + AT_ASSERT(unique_to_reg.count(e) > 0 && unique_to_reg.count(n) == 0); unique_to_reg[n] = unique_to_reg[e]; } } @@ -611,7 +611,7 @@ struct CodeImpl { size_t u = n->unique(); if (unique_to_reg.count(u) > 0) return unique_to_reg[u]; - JIT_ASSERT(!required); + AT_ASSERT(!required); int r = register_size++; unique_to_reg[u] = r; return r; @@ -717,7 +717,7 @@ struct InterpreterStateImpl : c10::intrusive_ptr_target { pc = new_pc; } catch (Suspend& e) { // wait() expects a single input - JIT_ASSERT(inst.inputs.values.size == 1); + AT_ASSERT(inst.inputs.values.size == 1); getOrCreateFuture(); diff --git a/torch/csrc/jit/interpreter.h b/torch/csrc/jit/interpreter.h index 689e10e..85af925 100644 --- a/torch/csrc/jit/interpreter.h +++ b/torch/csrc/jit/interpreter.h @@ -4,7 +4,7 @@ #include #include -#include +#include namespace at { class Tensor; @@ -26,6 +26,8 @@ struct InterpreterStateImpl; struct Graph; struct Node; using Stack = std::vector; +using c10::ivalue::Future; +using c10::ivalue::Tuple; struct TORCH_API Code { Code() : pImpl(nullptr) {} diff --git a/torch/csrc/jit/ir.cpp b/torch/csrc/jit/ir.cpp index 2ef0066..9a158b6 100644 --- a/torch/csrc/jit/ir.cpp +++ b/torch/csrc/jit/ir.cpp @@ -1,6 +1,6 @@ #include -#include +#include #include #include #include @@ -302,7 +302,7 @@ static void checkSameDevice(const Node* node) { has_device = true; device = type->device(); } else { - JIT_ASSERT(device == type->device()); + AT_ASSERT(device == type->device()); } } }; @@ -340,10 +340,10 @@ void Node::lint() const { for (auto input : inputs_) { // WARNING: O(n^2) // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) - JIT_ASSERT( + AT_ASSERT( std::find(ALL_OF(input->uses_), Use(const_cast(this), i)) != input->uses_.end()); - JIT_ASSERT(graph_->all_nodes.count(this) == 1); + AT_ASSERT(graph_->all_nodes.count(this) == 1); i++; } } @@ -354,20 +354,20 @@ void Node::lint() const { // Use invariants // - Use is consistent with inputs // - Every user node is live (checked in Graph) - JIT_ASSERT(use.user->inputs_[use.offset] == o); + AT_ASSERT(use.user->inputs_[use.offset] == o); i++; } } // Node subclass invariants IR_IF(this, Constant) - JIT_ASSERT(inputs_.size() == 0); + AT_ASSERT(inputs_.size() == 0); IR_ELSEIF(Return) // Return uses is zero - JIT_ASSERT(outputs().size() == 0); + AT_ASSERT(outputs().size() == 0); IR_ELSEIF(Param) // Param inputs is zero - JIT_ASSERT(inputs_.size() == 0); + AT_ASSERT(inputs_.size() == 0); IR_ELSEIFM_CONST(PythonOp) // Python operator cconv is correct size_t n_scalars = 0, n_tensors = 0; @@ -377,12 +377,12 @@ void Node::lint() const { } else if (c == 'd') { n_tensors++; } else { - JIT_ASSERT(0); + AT_ASSERT(0); } - JIT_ASSERT(static_cast(value->pyobj)); + AT_ASSERT(static_cast(value->pyobj)); } - JIT_ASSERT(n_scalars == value->scalar_args.size()); - JIT_ASSERT(n_tensors == inputs_.size()); + AT_ASSERT(n_scalars == value->scalar_args.size()); + AT_ASSERT(n_tensors == inputs_.size()); IR_ELSEIF(Eval) // TODO: add invariants // TODO: It's not good for these ops to be top-level, it makes cases longer. @@ -419,11 +419,11 @@ void Graph::lint() const { return nodes.count(n) > 0 || (parent && parent->contains(n)); } void insert(const Value* v) { - JIT_ASSERT(!contains(v)); + AT_ASSERT(!contains(v)); values.insert(v); } void insert(const Node* n) { - JIT_ASSERT(!contains(n)); + AT_ASSERT(!contains(n)); nodes.insert(n); } std::unique_ptr parent; @@ -449,22 +449,22 @@ void Graph::lint() const { void check_value(const Value* v) { scope->insert(v); auto b2 = seen_uniques.insert(v->unique()); - JIT_ASSERT(b2.second); // insertion took place - JIT_ASSERT(v->unique() < g.next_unique_); + AT_ASSERT(b2.second); // insertion took place + AT_ASSERT(v->unique() < g.next_unique_); for (auto use : v->uses()) { - JIT_ASSERT(!scope->contains(use.user)); - JIT_ASSERT(g.all_nodes.count(use.user) == 1); + AT_ASSERT(!scope->contains(use.user)); + AT_ASSERT(g.all_nodes.count(use.user) == 1); anticipated_uses[use.user]++; // int default constructs to 0 } } void check_node(const Node* n) { for (auto input : n->inputs_) { if (!scope->contains(input)) { - JIT_ASSERTM(0, input->unique(), " not in scope"); + AT_ASSERTM(0, input->unique(), " not in scope"); } } - JIT_ASSERT( + AT_ASSERT( anticipated_uses[n] == static_cast(n->inputs_.size())); anticipated_uses[n] = -1; // we saw the anticipated user! scope->insert(n); @@ -476,33 +476,33 @@ void Graph::lint() const { } size_t i = 0; for (auto o : n->outputs()) { - JIT_ASSERT(o->node() == n); - JIT_ASSERT(i++ == o->offset_); + AT_ASSERT(o->node() == n); + AT_ASSERT(i++ == o->offset_); check_value(o); } n->lint(); } void check_block(const Block* b) { // Check topological ordering - JIT_ASSERT(b->param_node()->isBefore(*b->nodes().begin())); + AT_ASSERT(b->param_node()->isBefore(*b->nodes().begin())); auto curNode = *b->nodes().begin(); while (curNode != b->return_node()) { - JIT_ASSERT(curNode->isBefore(curNode->next())); + AT_ASSERT(curNode->isBefore(curNode->next())); curNode = curNode->next(); } for (auto input : b->inputs()) { check_value(input); - JIT_ASSERT(input->node()->kind_ == prim::Param); + AT_ASSERT(input->node()->kind_ == prim::Param); } for (auto n : b->nodes()) { - JIT_ASSERT(n->kind_ != prim::Param); - JIT_ASSERT(n->kind_ != prim::Return); + AT_ASSERT(n->kind_ != prim::Param); + AT_ASSERT(n->kind_ != prim::Return); check_node(n); } - JIT_ASSERT(b->output_->kind() == prim::Return); + AT_ASSERT(b->output_->kind() == prim::Return); check_node(b->output_); // all_nodes @@ -516,9 +516,9 @@ void Graph::lint() const { node_set output_set{b->output_}; // TODO: Make a more type safe std::includes wrapper which disallows use // on non-ordered containers - JIT_ASSERT(std::includes(ALL_OF(all_nodes_set), ALL_OF(nodes_set))); - JIT_ASSERT(std::includes(ALL_OF(all_nodes_set), ALL_OF(inputs_set))); - JIT_ASSERT(std::includes(ALL_OF(all_nodes_set), ALL_OF(output_set))); + AT_ASSERT(std::includes(ALL_OF(all_nodes_set), ALL_OF(nodes_set))); + AT_ASSERT(std::includes(ALL_OF(all_nodes_set), ALL_OF(inputs_set))); + AT_ASSERT(std::includes(ALL_OF(all_nodes_set), ALL_OF(output_set))); sum_set.insert(ALL_OF(nodes_set)); sum_set.insert(ALL_OF(inputs_set)); @@ -530,9 +530,9 @@ void Graph::lint() const { check_block(g.block_); for (auto kv : anticipated_uses) { - JIT_ASSERT(kv.second == -1); + AT_ASSERT(kv.second == -1); } - JIT_ASSERT(std::includes(ALL_OF(sum_set), ALL_OF(all_nodes_set))); + AT_ASSERT(std::includes(ALL_OF(sum_set), ALL_OF(all_nodes_set))); } }; LintImpl(*this).check_graph(); @@ -690,7 +690,7 @@ Value* Value::copyMetadata(Value* from) { } void Value::replaceFirstUseWith(Value* newValue) { - JIT_ASSERT(owningGraph() == newValue->owningGraph()); + AT_ASSERT(owningGraph() == newValue->owningGraph()); auto u = uses()[0]; u.user->inputs_[u.offset] = newValue; newValue->uses_.push_back(u); @@ -869,8 +869,8 @@ Node::Node(Graph* graph_, NodeKind kind_) } void Node::eraseOutput(size_t i) { - JIT_ASSERT(i < outputs_.size()); - JIT_ASSERT(outputs_[i]->uses().empty()); + AT_ASSERT(i < outputs_.size()); + AT_ASSERT(outputs_[i]->uses().empty()); schema_ = nullptr; Value* n = outputs_[i]; outputs_.erase(outputs_.begin() + i); @@ -887,7 +887,7 @@ Block* Node::addBlock() { } void Node::eraseBlock(size_t i) { - JIT_ASSERT(i < blocks_.size()); + AT_ASSERT(i < blocks_.size()); schema_ = nullptr; Block* n = blocks_[i]; blocks_.erase(blocks_.begin() + i); @@ -913,7 +913,7 @@ void Node::cloneFrom(Node* s) { } void Node::replaceAllUsesWith(Node* n) { - JIT_ASSERT(outputs().size() == n->outputs().size()); + AT_ASSERT(outputs().size() == n->outputs().size()); size_t nOutputs = outputs().size(); for (size_t i = 0; i < nOutputs; i++) { outputs()[i]->replaceAllUsesWith(n->outputs()[i]); @@ -921,7 +921,7 @@ void Node::replaceAllUsesWith(Node* n) { } Value* Node::insertInput(size_t i, Value* value) { - JIT_ASSERT(graph_ == value->owningGraph()); + AT_ASSERT(graph_ == value->owningGraph()); schema_ = nullptr; // First we update the offsets for all existing inputs that will reside // after the one we're inserting. Concretely, these are the inputs at @@ -940,7 +940,7 @@ Value* Node::insertInput(size_t i, Value* value) { } Value* Node::addInput(Value* value) { - JIT_ASSERT(graph_ == value->owningGraph()); + AT_ASSERT(graph_ == value->owningGraph()); schema_ = nullptr; value->uses_.emplace_back(this, inputs_.size()); inputs_.push_back(value); @@ -948,7 +948,7 @@ Value* Node::addInput(Value* value) { } Value* Node::replaceInput(size_t i, Value* newValue) { - JIT_ASSERT(newValue->owningGraph() == graph_); + AT_ASSERT(newValue->owningGraph() == graph_); schema_ = nullptr; Value* old = dropInput(i); inputs_[i] = newValue; @@ -957,8 +957,8 @@ Value* Node::replaceInput(size_t i, Value* newValue) { } void Node::replaceInputWith(Value* from, Value* to) { - JIT_ASSERT(from->owningGraph() == graph_); - JIT_ASSERT(to->owningGraph() == graph_); + AT_ASSERT(from->owningGraph() == graph_); + AT_ASSERT(to->owningGraph() == graph_); schema_ = nullptr; size_t i = 0; for (auto input : inputs()) { @@ -993,7 +993,7 @@ bool Node::isBeforeOrAfter(const Node* n, MoveSide moveSide) const { return this->topo_position_ > n->topo_position_; } - JIT_ASSERT(this == n); + AT_ASSERT(this == n); return false; } @@ -1001,7 +1001,7 @@ bool Node::isBeforeOrAfter(const Node* n, MoveSide moveSide) const { // until we find the first common block. auto lhs = this; while (lhs) { - JIT_ASSERT(lhs->owningBlock()); + AT_ASSERT(lhs->owningBlock()); auto rhs = n; while (rhs) { @@ -1018,7 +1018,7 @@ bool Node::isBeforeOrAfter(const Node* n, MoveSide moveSide) const { lhs = lhs->owningBlock()->owningNode(); } // should never reach here, since both nodes are ultimately in the same graph - JIT_ASSERT(false); + AT_ASSERT(false); } bool Node::isBefore(const Node* n) const { @@ -1030,14 +1030,14 @@ bool Node::isAfter(const Node* n) const { } Node* Node::insertBefore(Node* n) { - JIT_ASSERT(n->inBlockList()); + AT_ASSERT(n->inBlockList()); insertAfter(n->prev()); return this; } Node* Node::insertAfter(Node* n) { - JIT_ASSERT(!inBlockList() && n->inBlockList()); - JIT_ASSERT(n->owningBlock()); + AT_ASSERT(!inBlockList() && n->inBlockList()); + AT_ASSERT(n->owningBlock()); this->owning_block_ = n->owningBlock(); Node* next = n->next(); n->next() = this; @@ -1082,12 +1082,12 @@ use_list::iterator Node::findUseForInput(size_t i) { // O(N) on the use list, but unless we get nodes with +100 uses // vector traversal still is probably faster than linked list auto use_it = std::find(input_uses.begin(), input_uses.end(), Use(this, i)); - JIT_ASSERT(use_it != input_uses.end()); + AT_ASSERT(use_it != input_uses.end()); return use_it; } Value* Node::dropInput(size_t i) { - JIT_ASSERT(i < inputs_.size()); + AT_ASSERT(i < inputs_.size()); auto input_node = inputs_[i]; auto use_it = findUseForInput(i); input_node->uses_.erase(use_it); @@ -1096,7 +1096,7 @@ Value* Node::dropInput(size_t i) { } void Node::removeFromList() { - JIT_ASSERT(inBlockList()); + AT_ASSERT(inBlockList()); this->owning_block_ = nullptr; Node* next = this->next(); Node* prev = this->prev(); @@ -1203,7 +1203,7 @@ Node* Graph::createTupleSlice(Value* tup, int64_t beg, int64_t end) { Node* Graph::createList(const TypePtr& elem_type, at::ArrayRef values) { auto n = create(prim::ListConstruct, values); for (const auto& v : values) { - JIT_ASSERT(v->type()->isSubtypeOf(elem_type)); + AT_ASSERT(v->type()->isSubtypeOf(elem_type)); } n->output()->setType(ListType::create(elem_type)); return n; @@ -1277,20 +1277,20 @@ Graph::~Graph() { void Graph::freeNode(Node* n) { auto it = all_nodes.find(n); - JIT_ASSERT(it != all_nodes.end()); + AT_ASSERT(it != all_nodes.end()); delete *it; all_nodes.erase(it); } void Graph::freeValue(Value* v) { v->setUniqueName(""); auto it = all_values.find(v); - JIT_ASSERT(it != all_values.end()); + AT_ASSERT(it != all_values.end()); delete *it; all_values.erase(it); } void Graph::freeBlock(Block* b) { auto it = all_blocks.find(b); - JIT_ASSERT(it != all_blocks.end()); + AT_ASSERT(it != all_blocks.end()); delete *it; all_blocks.erase(it); } @@ -1311,7 +1311,7 @@ std::vector inlineCallTo( bool unpack_outputs) { std::unordered_map value_map; auto value_map_func = [&](Value* v) { return value_map.at(v); }; - JIT_ASSERT(callee.inputs().size() == inputs.size()); + AT_ASSERT(callee.inputs().size() == inputs.size()); for (size_t i = 0; i < inputs.size(); ++i) { value_map[callee.inputs()[i]] = inputs[i]; } diff --git a/torch/csrc/jit/ir.h b/torch/csrc/jit/ir.h index ad829bd..a36031f 100644 --- a/torch/csrc/jit/ir.h +++ b/torch/csrc/jit/ir.h @@ -1,17 +1,11 @@ #pragma once -#include #include -#include -#include #include #include -#include -#include #include #include #include -#include #include #include @@ -19,7 +13,12 @@ #include #include +#include +#include +#include +#include #include +#include #include #include @@ -41,6 +40,51 @@ struct Function; namespace torch { namespace jit { +using ::c10::Symbol; +using ::c10::Argument; +using ::c10::FunctionSchema; + +using ::c10::ivalue::List; +using ::c10::ivalue::Shared; + +using ::c10::IValue; +using ::c10::ivalue::Future; +using ::c10::ivalue::Tuple; + +using ::c10::ivalue::BoolList; +using ::c10::ivalue::DoubleList; +using ::c10::ivalue::GenericList; +using ::c10::ivalue::IntList; +using ::c10::ivalue::TensorList; + +using ::c10::ivalue::ConstantString; + +#define C10_USING(T) using ::c10::T; +C10_FORALL_TYPES(C10_USING) +#undef C10_USING + +#define C10_USING(T) using ::c10::T##Ptr; +C10_FORALL_TYPES(C10_USING) +#undef C10_USING + +using ::c10::Type; +using ::c10::TypeEnv; +using ::c10::TypePtr; + +using ::c10::getTypePtr; +using ::c10::MatchTypeReturn; +using ::c10::TypeKind; + +namespace prim { +using namespace ::c10::prim; +} +namespace attr { +using namespace ::c10::attr; +} +namespace aten { +using namespace ::c10::aten; +} + // Graph represents one "function" of computation. // It uses a simple ownership model where the graph owns all the nodes inside // it. All references inside the graph are raw pointers. Destroying the Graph @@ -130,7 +174,7 @@ struct Value { setType(CompleteTensorType::create(output)); } const TypePtr& type() const { - JIT_ASSERT(type_ != nullptr); + AT_ASSERT(type_ != nullptr); return type_; } bool requires_grad() const { @@ -323,19 +367,19 @@ struct Node { // lots of things like chunk have a single input or single output, so we have // a helper to make accessing it easier Value* input() { - JIT_ASSERT(inputs_.size() == 1); + AT_ASSERT(inputs_.size() == 1); return inputs_.at(0); } Value* output() { - JIT_ASSERT(outputs_.size() == 1); + AT_ASSERT(outputs_.size() == 1); return outputs_.at(0); } const Value* output() const { - JIT_ASSERT(outputs_.size() == 1); + AT_ASSERT(outputs_.size() == 1); return outputs_.at(0); } const Value* input() const { - JIT_ASSERT(inputs_.size() == 1); + AT_ASSERT(inputs_.size() == 1); return inputs_.at(0); } // Access a particular input. This is a checked index. @@ -551,7 +595,7 @@ struct Node { } template T* expect() { - JIT_ASSERTM( + AT_CHECK( T::Kind == kind(), "expected a ", T::Kind.toDisplayString(), @@ -589,21 +633,21 @@ struct Node { } } bool hasAttribute(Symbol name) const { - JIT_ASSERT(name.is_attr()); + AT_ASSERT(name.is_attr()); return findAttr(name, false) != values_.end(); } bool hasAttributeS(const std::string& name) const { return hasAttribute(Symbol::attr(name)); } AttributeKind kindOf(Symbol name) const { - JIT_ASSERT(name.is_attr()); + AT_ASSERT(name.is_attr()); return (*findAttr(name, true))->kind(); } AttributeKind kindOfS(const std::string& name) const { return kindOf(Symbol::attr(name)); } Node* removeAttribute(Symbol name) { - JIT_ASSERT(name.is_attr()); + AT_ASSERT(name.is_attr()); values_.erase(findAttr(name, true)); return this; } @@ -658,7 +702,7 @@ struct Node { // does not use CREATE_ACCESSOR because we need additional asserts Node* t_(Symbol name, TensorAttr::ConstructorType v) { - JIT_ASSERT(!v.defined() || !v.is_variable()); + AT_ASSERT(!v.defined() || !v.is_variable()); return setAttr(name, std::forward(v)); } const TensorAttr::ValueType& t(Symbol name) const { @@ -667,7 +711,7 @@ struct Node { Node* ts_(Symbol name, TensorsAttr::ConstructorType v) { for (auto& t : v) { - JIT_ASSERT(!t.defined() || !t.is_variable()); + AT_ASSERT(!t.defined() || !t.is_variable()); } return setAttr( name, std::forward(v)); @@ -682,7 +726,7 @@ struct Node { template Node* setAttr(Symbol name, typename T::ConstructorType v) { - JIT_ASSERT(name.is_attr()); + AT_ASSERT(name.is_attr()); auto it = findAttr(name, false); auto nv = AVPtr(new T(name, std::forward(v))); if (it == values_.end()) { @@ -694,7 +738,7 @@ struct Node { } template typename T::ValueType& getAttr(Symbol name) const { - JIT_ASSERT(name.is_attr()); + AT_ASSERT(name.is_attr()); auto it = findAttr(name, true); auto* child = dynamic_cast(it->get()); if (child == nullptr) { @@ -708,25 +752,25 @@ struct Node { // a big pile of messages. std::vector values_; std::vector::iterator findAttr(Symbol name, bool required) { - JIT_ASSERT(name.is_attr()); + AT_ASSERT(name.is_attr()); auto it = std::find_if(values_.begin(), values_.end(), [&](const AVPtr& v) { return v->name == name; }); if (required && it == values_.end()) { throw AttributeError(name, false); } - JIT_ASSERT(!required || it != values_.end()); + AT_ASSERT(!required || it != values_.end()); return it; } std::vector::const_iterator findAttr(Symbol name, bool required) const { - JIT_ASSERT(name.is_attr()); + AT_ASSERT(name.is_attr()); auto it = std::find_if(values_.begin(), values_.end(), [&](const AVPtr& v) { return v->name == name; }); if (required && it == values_.end()) { throw AttributeError(name, false); } - JIT_ASSERT(!required || it != values_.end()); + AT_ASSERT(!required || it != values_.end()); return it; } @@ -746,7 +790,7 @@ struct Node { bool inBlockList() const { if (next() == nullptr) { - JIT_ASSERT(prev() == nullptr); + AT_ASSERT(prev() == nullptr); } return next() != nullptr; } @@ -834,13 +878,13 @@ struct Block { output_->removeInput(i); } Node* appendNode(Node* n) { - JIT_ASSERT(n->graph_ == graph_ && !n->inBlockList()); + AT_ASSERT(n->graph_ == graph_ && !n->inBlockList()); n->insertBefore(output_); return n; } Node* prependNode(Node* n) { - JIT_ASSERT(n->graph_ == graph_ && !n->inBlockList()); + AT_ASSERT(n->graph_ == graph_ && !n->inBlockList()); n->insertAfter(output_); return n; } @@ -1051,21 +1095,21 @@ struct Graph { // initialized to insert at the end of the top level block // can be changed with setInsertPoint() Node* insertNode(Node* n) { - JIT_ASSERT( + AT_ASSERT( insert_before_->inBlockList() && "insert point node is no longer in a block list"); return n->insertBefore(insert_before_); } // set where nodes are inserted to append to the end of this block void setInsertPoint(Block* b) { - JIT_ASSERT(b->owningGraph() == this); + AT_ASSERT(b->owningGraph() == this); insert_before_ = b->return_node(); } // set where nodes are inserted to insert _before_ this node // for implementation simplicity we only support inserting before a node for // now void setInsertPoint(Node* n) { - JIT_ASSERT(n->owningGraph() == this && n->inBlockList()); + AT_ASSERT(n->owningGraph() == this && n->inBlockList()); insert_before_ = n; } Node* insertPoint() { @@ -1134,7 +1178,7 @@ inline Value::Value(Node* node_, size_t offset_) } inline Value* Value::setType(TypePtr type) { - JIT_ASSERT(type); + AT_ASSERT(type); type_ = std::move(type); for (Use& use : uses_) { use.user->schema_ = nullptr; @@ -1198,9 +1242,9 @@ inline const Graph* Value::owningGraph() const { // execute a Python function, used for Ops we can't optimize but that we want to // optimize around struct PythonOp : public Node { - static constexpr Symbol Kind = prim::PythonOp; + static constexpr Symbol Kind = ::c10::prim::PythonOp; - PythonOp(Graph* graph) : Node(graph, prim::PythonOp) {} + PythonOp(Graph* graph) : Node(graph, ::c10::prim::PythonOp) {} PythonOp* init( THPObjectPtr&& pyobj, const std::string& cconv, diff --git a/torch/csrc/jit/ir_views.h b/torch/csrc/jit/ir_views.h index 0c569cd..7bc1e1c 100644 --- a/torch/csrc/jit/ir_views.h +++ b/torch/csrc/jit/ir_views.h @@ -5,7 +5,7 @@ namespace jit { struct IfView { explicit IfView(Node* node) : node_(node) { - JIT_ASSERT(node->kind() == prim::If); + AT_ASSERT(node->kind() == ::c10::prim::If); } Value* cond() const { return node_->input(0); @@ -38,7 +38,8 @@ struct IfView { struct LoopView { explicit LoopView(Node* node) : node_(node) { - JIT_ASSERT(node->kind() == prim::Loop || node->kind() == onnx::Loop); + AT_ASSERT( + node->kind() == ::c10::prim::Loop || node->kind() == ::c10::onnx::Loop); } Block* bodyBlock() const { return node_->blocks().at(0); diff --git a/torch/csrc/jit/ivalue.h b/torch/csrc/jit/ivalue.h deleted file mode 100644 index d3130db..0000000 --- a/torch/csrc/jit/ivalue.h +++ /dev/null @@ -1,23 +0,0 @@ -#pragma once -#include - -namespace torch { -namespace jit { - -using ::c10::ivalue::List; -using ::c10::ivalue::Shared; - -using ::c10::IValue; -using ::c10::ivalue::Future; -using ::c10::ivalue::Tuple; - -using ::c10::ivalue::BoolList; -using ::c10::ivalue::DoubleList; -using ::c10::ivalue::GenericList; -using ::c10::ivalue::IntList; -using ::c10::ivalue::TensorList; - -using ::c10::ivalue::ConstantString; - -} // namespace jit -} // namespace torch diff --git a/torch/csrc/jit/named_value.h b/torch/csrc/jit/named_value.h index 983e08f..0d70660 100644 --- a/torch/csrc/jit/named_value.h +++ b/torch/csrc/jit/named_value.h @@ -1,7 +1,7 @@ #pragma once #include #include -#include +#include #include #include @@ -57,7 +57,7 @@ struct NamedValue { } const std::string& name() const { - JIT_ASSERT(name_); + AT_ASSERT(name_); return *name_; } diff --git a/torch/csrc/jit/node_hashing.cpp b/torch/csrc/jit/node_hashing.cpp index 5c32cca..04107f8 100644 --- a/torch/csrc/jit/node_hashing.cpp +++ b/torch/csrc/jit/node_hashing.cpp @@ -3,8 +3,8 @@ #include #include -#include -#include +#include +#include #include #include #include @@ -31,8 +31,8 @@ bool tensorListEqual( // This function may be too conservative for general use. // Do NOT support g/gs attributes. bool attributesEqualCSE(const Node* lhs, const Node* rhs) { - JIT_ASSERT(lhs != nullptr); - JIT_ASSERT(rhs != nullptr); + AT_ASSERT(lhs != nullptr); + AT_ASSERT(rhs != nullptr); // One has attributes, the other does not. if (lhs->hasAttributes() != rhs->hasAttributes()) return false; @@ -88,7 +88,7 @@ bool attributesEqualCSE(const Node* lhs, const Node* rhs) { } // anonymous namespace size_t HashNode::operator()(const Node* k) const { - JIT_ASSERT(k != nullptr); + AT_ASSERT(k != nullptr); return get_hash( k->kind(), fmap(k->outputs(), [](const Value* v) { return v->type()->kind(); }), diff --git a/torch/csrc/jit/operator.cpp b/torch/csrc/jit/operator.cpp index 50c0bbb..f7e40a8 100644 --- a/torch/csrc/jit/operator.cpp +++ b/torch/csrc/jit/operator.cpp @@ -418,7 +418,7 @@ struct OperatorRegistry { } } #endif - JIT_ASSERTM( + AT_CHECK( op_ptr_it != operators_by_sig.end(), "Couldn't find an operator for ", name); diff --git a/torch/csrc/jit/operator.h b/torch/csrc/jit/operator.h index 928b468..58b7e33 100644 --- a/torch/csrc/jit/operator.h +++ b/torch/csrc/jit/operator.h @@ -3,12 +3,12 @@ // it now to implement correct semantic checking for script #pragma once -#include -#include +#include #include #include #include +#include #include #include @@ -21,6 +21,8 @@ namespace torch { namespace jit { +using ::c10::FunctionSchema; + TORCH_API FunctionSchema parseSchema(const std::string& schema); using OperationCreator = std::function; diff --git a/torch/csrc/jit/passes/alias_analysis.cpp b/torch/csrc/jit/passes/alias_analysis.cpp index c98929c..3d401c6 100644 --- a/torch/csrc/jit/passes/alias_analysis.cpp +++ b/torch/csrc/jit/passes/alias_analysis.cpp @@ -31,7 +31,7 @@ AliasDb::AliasDb(std::shared_ptr graph) : graph_(std::move(graph)) { const auto value = pr.first; const auto& aliasInfo = pr.second; // We don't support composite types yet - JIT_ASSERT(aliasInfo.containedTypes().size() == 0); + AT_ASSERT(aliasInfo.containedTypes().size() == 0); for (const auto aliasSet : aliasInfo.sets()) { aliasToValue_[aliasSet].insert(value); } @@ -67,7 +67,7 @@ bool AliasDb::writesTo(Node* n, const Value* v) const { } const auto& aliasInfo = valueToAlias_.at(v); - JIT_ASSERT(aliasInfo.sets().size() > 0); + AT_ASSERT(aliasInfo.sets().size() > 0); // We only need to check one alias set, since if this value belongs to // multiple alias sets they are all written to const auto& aliasSet = *aliasInfo.sets().begin(); @@ -288,7 +288,7 @@ void AliasDb::analyze(const std::shared_ptr& graph) { } addAlias(input, tupleTypeAliases.at(tupleType)); } else { - JIT_ASSERT(!shouldAnnotate(input)); + AT_ASSERT(!shouldAnnotate(input)); } } @@ -388,10 +388,10 @@ void AliasDb::analyze(Node* node) { } // We don't support composite types for alias analysis yet. - JIT_ASSERT(formal->containedTypes().size() == 0); + AT_ASSERT(formal->containedTypes().size() == 0); // TODO neither unions nor wildcards make sense on an input. We should // disallow them in function schema - JIT_ASSERT(!formal->isWildcard()) + AT_ASSERT(!formal->isWildcard()) const auto& formalAlias = formal->set(); // skip if we've already bound this alias @@ -428,7 +428,7 @@ void AliasDb::analyze(Node* node) { } // We don't support composite types for alias analysis yet. - JIT_ASSERT(formal->containedTypes().size() == 0); + AT_ASSERT(formal->containedTypes().size() == 0); const auto& formalAlias = formal->set(); auto outputAlias = formalToActual.at(formalAlias); @@ -472,8 +472,8 @@ void AliasDb::analyzeLoop(Node* node) { const auto loopCarriedInputs = node->inputs().slice(2); // skip max, cond const auto blockInputs = bodyBlock->inputs().slice(1); // skip trip const auto blockOutputs = bodyBlock->outputs().slice(1); // skip trip - JIT_ASSERT(loopCarriedInputs.size() == blockInputs.size()); - JIT_ASSERT(blockOutputs.size() == node->outputs().size()); + AT_ASSERT(loopCarriedInputs.size() == blockInputs.size()); + AT_ASSERT(blockOutputs.size() == node->outputs().size()); // Run alias analysis on the loop body, iterating until the block output // alias info converges. @@ -496,7 +496,7 @@ void AliasDb::analyzeLoop(Node* node) { // Check whether or not this would change anything if (valueToAlias_.count(input) != 0) { - JIT_ASSERT(valueToAlias_.count(output) != 0) + AT_ASSERT(valueToAlias_.count(output) != 0) if (!valueToAlias_[output].isSubsetOf(valueToAlias_[input])) { notConverged = true; } @@ -519,7 +519,7 @@ void AliasDb::analyzeSubgraph(Node* node) { // TODO(suo): the subgraph outputs and node outputs are NOT NECESSARILY the // same length. Autodifferentiation maybe capture additional outputs in the // subgraph block. - JIT_ASSERT(subgraphBlock->outputs().size() >= node->outputs().size()); + AT_ASSERT(subgraphBlock->outputs().size() >= node->outputs().size()); for (size_t i = 0; i < node->outputs().size(); i++) { addAlias(node->outputs()[i], subgraphBlock->outputs()[i]); } @@ -605,14 +605,14 @@ void AliasDb::addAlias(const Value* value, Symbol alias) { // Union the alias info of `value` with `from` void AliasDb::addAlias(const Value* value, const Value* from) { if (!shouldAnnotate(value)) { - JIT_ASSERT(!shouldAnnotate(from)); + AT_ASSERT(!shouldAnnotate(from)); return; } addAlias(value, valueToAlias_.at(from)); } void AliasDb::mapAliases(at::ArrayRef to, at::ArrayRef from) { - JIT_ASSERT(to.size() == from.size()); + AT_ASSERT(to.size() == from.size()); for (size_t i = 0; i < to.size(); i++) { addAlias(to[i], from[i]); } @@ -792,7 +792,7 @@ class AliasDb::WorkingSet { // outside), then return nullptr. Since we can only reorder nodes within a // block, `target` would be irrelevant. static Node* findSameBlock(Node* target, Node* n) { - JIT_ASSERT(target->owningGraph() == n->owningGraph()); + AT_ASSERT(target->owningGraph() == n->owningGraph()); if (target->owningBlock() == n->owningBlock()) { return target; } else { @@ -833,7 +833,7 @@ bool AliasDb::tryMove( Node* movePoint, MoveSide moveSide, bool dryRun) { - JIT_ASSERT(toMove->owningBlock() == movePoint->owningBlock()); + AT_ASSERT(toMove->owningBlock() == movePoint->owningBlock()); if (toMove == movePoint) { return true; } @@ -897,7 +897,7 @@ bool AliasDb::tryMove( } // 3. Execute the move - JIT_ASSERT(curNode == movePoint); + AT_ASSERT(curNode == movePoint); if (splitToMoveAndDeps) { // Move `toMove` move(toMove, movePoint, moveSide); @@ -977,7 +977,7 @@ bool AliasDb::isBeforeSameGraph(const Node* a, const Node* b) const { } lhs = subgraphToOwner_.at(lhs->owningGraph()); } - JIT_ASSERT(false); + AT_ASSERT(false); } } // namespace jit } // namespace torch diff --git a/torch/csrc/jit/passes/batch_mm.cpp b/torch/csrc/jit/passes/batch_mm.cpp index 3f1693b..bad7d72 100644 --- a/torch/csrc/jit/passes/batch_mm.cpp +++ b/torch/csrc/jit/passes/batch_mm.cpp @@ -1,9 +1,9 @@ #include -#include +#include #include #include -#include +#include #include #include #include @@ -102,8 +102,8 @@ RegisterOperators mm_tree_reduction_reg( } drop(stack, num_inputs); - JIT_ASSERT(inputs.size() > 0); - JIT_ASSERT(inputs.size() % 2 == 0); + AT_ASSERT(inputs.size() > 0); + AT_ASSERT(inputs.size() % 2 == 0); size_t side_num_elems = inputs.size() / 2; auto lhs_inputs = at::TensorList(inputs).slice(0, side_num_elems); auto rhs_inputs = at::TensorList(inputs).slice(side_num_elems); @@ -188,7 +188,7 @@ struct TreeToken { matmuls.push_back(n); } else if (n->matches("aten::t(Tensor self) -> Tensor")) { Node* input_node = n->input()->node(); - JIT_ASSERT(input_node->matches( + AT_ASSERT(input_node->matches( "aten::mm(Tensor self, Tensor mat2) -> Tensor")); // (AB)^T == B^TA^T WithInsertPoint insert_guard{input_node}; @@ -374,10 +374,10 @@ void BatchMMSide(Block* block, AliasDb& alias_db) { // NB: 8 is the current loop unrolling factor static constexpr size_t how_many_is_many = 8; const auto batch_side = [&](std::vector& mms, Side side) { - JIT_ASSERT(!mms.empty()); + AT_ASSERT(!mms.empty()); for (int64_t i = static_cast(mms.size()) - 2; i >= 0; --i) { bool move_ok = alias_db.moveBeforeTopologicallyValid(mms[i], mms[i + 1]); - JIT_ASSERT(move_ok); + AT_ASSERT(move_ok); } WithInsertPoint insert_guard{mms[0]}; Graph* graph = mms[0]->owningGraph(); diff --git a/torch/csrc/jit/passes/common_subexpression_elimination.cpp b/torch/csrc/jit/passes/common_subexpression_elimination.cpp index f2be647..dc4bfc9 100644 --- a/torch/csrc/jit/passes/common_subexpression_elimination.cpp +++ b/torch/csrc/jit/passes/common_subexpression_elimination.cpp @@ -3,8 +3,8 @@ #include #include -#include -#include +#include +#include #include #include #include diff --git a/torch/csrc/jit/passes/constant_pooling.cpp b/torch/csrc/jit/passes/constant_pooling.cpp index 8c08eb7..ab70489 100644 --- a/torch/csrc/jit/passes/constant_pooling.cpp +++ b/torch/csrc/jit/passes/constant_pooling.cpp @@ -1,4 +1,4 @@ -#include +#include #include #include #include diff --git a/torch/csrc/jit/passes/constant_propagation.cpp b/torch/csrc/jit/passes/constant_propagation.cpp index f580a94..ddadbdb 100644 --- a/torch/csrc/jit/passes/constant_propagation.cpp +++ b/torch/csrc/jit/passes/constant_propagation.cpp @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include #include @@ -116,7 +116,7 @@ void inlineIfBody(Block* body) { void inlineIf(Node* n, const AliasDb& aliasDb) { auto input_bool = constant_as(n->input()); - JIT_ASSERT(input_bool); + AT_ASSERT(input_bool); size_t block_index = *input_bool ? 0 : 1; ConstantPropagation(n->blocks().at(block_index), aliasDb); inlineIfBody(n->blocks().at(block_index)); @@ -124,7 +124,7 @@ void inlineIf(Node* n, const AliasDb& aliasDb) { // remove extra outputs from the node bool removeExtraIfOutputs(Node* n) { - JIT_ASSERTM(n->kind() == prim::If, "Only supported for If nodes"); + AT_CHECK(n->kind() == prim::If, "Only supported for If nodes"); auto true_block = n->blocks()[0]; auto false_block = n->blocks()[1]; auto initial_outputs = true_block->outputs().size(); diff --git a/torch/csrc/jit/passes/create_autodiff_subgraphs.cpp b/torch/csrc/jit/passes/create_autodiff_subgraphs.cpp index 292a491..16e2363 100644 --- a/torch/csrc/jit/passes/create_autodiff_subgraphs.cpp +++ b/torch/csrc/jit/passes/create_autodiff_subgraphs.cpp @@ -1,6 +1,6 @@ #include -#include +#include #include #include #include @@ -82,7 +82,7 @@ class SubgraphSlicer { // // Returns true if an inlining has occured, false otherwise. bool inlineIfTooSmall(Node* n) { - JIT_ASSERT(n->kind() == prim::DifferentiableGraph); + AT_ASSERT(n->kind() == prim::DifferentiableGraph); auto subgraph = SubgraphUtils::getSubgraph(n); size_t i = 0; for (auto it = subgraph->nodes().begin(); it != subgraph->nodes().end(); @@ -148,7 +148,7 @@ class SubgraphSlicer { Node* consumer, Node* producer, AliasDb& aliasDb) { - JIT_ASSERT(consumer->kind() == prim::DifferentiableGraph); + AT_ASSERT(consumer->kind() == prim::DifferentiableGraph); bool canMerge = shouldConsiderForMerge(producer) && aliasDb.moveBeforeTopologicallyValid(producer, consumer); diff --git a/torch/csrc/jit/passes/dead_code_elimination.cpp b/torch/csrc/jit/passes/dead_code_elimination.cpp index d43694b..238e6db 100644 --- a/torch/csrc/jit/passes/dead_code_elimination.cpp +++ b/torch/csrc/jit/passes/dead_code_elimination.cpp @@ -8,6 +8,10 @@ namespace torch { namespace jit { +namespace prim { +using namespace ::c10::prim; +} + class DeadCodeEliminator { public: explicit DeadCodeEliminator(std::shared_ptr graph) @@ -56,7 +60,7 @@ class DeadCodeEliminator { return; } - JIT_ASSERT(node->owningBlock()->return_node() == node); + AT_ASSERT(node->owningBlock()->return_node() == node); auto outerNode = node->owningBlock()->owningNode(); if (outerNode == nullptr || outerNode->kind() == prim::Reverse) { // If there's no outer node, we're looking at the graph's top-level @@ -66,7 +70,8 @@ class DeadCodeEliminator { } // Collect all inputs that are actually live - if (outerNode->kind() == prim::Loop || outerNode->kind() == onnx::Loop) { + if (outerNode->kind() == prim::Loop || + outerNode->kind() == c10::onnx::Loop) { // Special handling to deal with loop carried dependencies. auto loop = LoopView(outerNode); for (size_t i = 0; i < loop.carriedOutputs().size(); i++) { @@ -82,7 +87,7 @@ class DeadCodeEliminator { // the loop body. liveValues_.insert(loop.nextCond()); } else { - JIT_ASSERT(outerNode->outputs().size() == node->inputs().size()); + AT_ASSERT(outerNode->outputs().size() == node->inputs().size()); for (size_t i = 0; i < outerNode->outputs().size(); i++) { auto innerOutput = node->inputs()[i]; auto outerOutput = outerNode->outputs()[i]; diff --git a/torch/csrc/jit/passes/graph_fuser.cpp b/torch/csrc/jit/passes/graph_fuser.cpp index d59ce74..3d12c1b 100644 --- a/torch/csrc/jit/passes/graph_fuser.cpp +++ b/torch/csrc/jit/passes/graph_fuser.cpp @@ -1,7 +1,7 @@ #include #include -#include +#include #include #include #include @@ -125,7 +125,7 @@ RegisterOperators reg_bn_unsqueeze({Operator( const int64_t ndim = pop(stack).toInt(); auto self = pop(stack).toTensor(); c10::SmallVector sizes(ndim, 1); - JIT_ASSERT(self.dim() == 1); + AT_ASSERT(self.dim() == 1); sizes.at(1) = self.size(0); push(stack, self.reshape(sizes)); return 0; @@ -157,7 +157,7 @@ bool isFusableBatchNorm(Node* batch_norm) { } Value* broadcastSizes(at::ArrayRef sizes) { - JIT_ASSERT(!sizes.empty()); + AT_ASSERT(!sizes.empty()); Graph* graph = sizes[0]->owningGraph(); Node* broadcast_n = graph->insertNode(graph->create(prim::BroadcastSizes, sizes)); @@ -225,7 +225,7 @@ struct GraphFuser { } Graph& getSubgraph(Node* n) { - JIT_ASSERT(n->kind() == prim::FusionGroup); + AT_ASSERT(n->kind() == prim::FusionGroup); return *n->g(attr::Subgraph); } @@ -254,7 +254,7 @@ struct GraphFuser { }, &bn_graph); - JIT_ASSERT(isFusableBatchNorm(batch_norm)); + AT_ASSERT(isFusableBatchNorm(batch_norm)); WithInsertPoint insert_guard{batch_norm}; Value* input = batch_norm->namedInput(attr::input); Value* input_dim = graph_->insert(aten::dim, {input}); @@ -344,13 +344,13 @@ struct GraphFuser { // DOES NOT WORK if n is a consumer of an output of the fusion group // returns the node _inside_ the group that represents the node Node* mergeNodeIntoGroup(Node* group, Node* n) { - JIT_ASSERT(n->kind() != prim::FusionGroup); + AT_ASSERT(n->kind() != prim::FusionGroup); auto& subgraph = getSubgraph(group); // map from nodes in the surrounding graph to parameters in the fusion // group's subgraph that correspond to them std::unordered_map inputs_map; size_t i = 0; - JIT_ASSERT(group->inputs().size() == subgraph.inputs().size()); + AT_ASSERT(group->inputs().size() == subgraph.inputs().size()); for (auto input : group->inputs()) { inputs_map[input] = subgraph.inputs()[i++]; } @@ -369,7 +369,7 @@ struct GraphFuser { // so we generally don't allow fusing tensor-scalar operations unless // the scalar is constant. In those cases we inline the constants // directly in the body of the fused group. - JIT_ASSERT(input->node()->kind() == prim::Constant); + AT_ASSERT(input->node()->kind() == prim::Constant); Node* in_const = subgraph.createClone(input->node(), [](Value*) -> Value* { throw std::runtime_error("unexpected input"); @@ -461,7 +461,7 @@ struct GraphFuser { mergeFusionGroups(group, producer->node()); return group; } - JIT_ASSERT(producer->node()->outputs().size() == 1); + AT_ASSERT(producer->node()->outputs().size() == 1); Node* merged = mergeNodeIntoGroup(group, producer->node()); // remaining uses of this producer can occur because we allow // fusion in cases where uses remain after the consumer @@ -502,7 +502,7 @@ struct GraphFuser { } c10::optional findFusedChunk(Node* group, Value* input) { - JIT_ASSERT(group->kind() == prim::FusionGroup); + AT_ASSERT(group->kind() == prim::FusionGroup); auto it = std::find(group->inputs().begin(), group->inputs().end(), input); if (it == group->inputs().end()) { return c10::nullopt; @@ -513,7 +513,7 @@ struct GraphFuser { // If subgraph_input is an input to prim::ConstantChunk, it will have 1 use auto* node = subgraph_input->uses().at(0).user; if (node->kind() == prim::ConstantChunk) { - JIT_ASSERT(subgraph_input->uses().size() == 1); + AT_ASSERT(subgraph_input->uses().size() == 1); return node; } return c10::nullopt; @@ -551,8 +551,8 @@ struct GraphFuser { // input. graph_node_list::iterator fuseChunk(Node* consumer, Value* producer) { auto* chunk = producer->node(); - JIT_ASSERT(consumer->kind() == prim::FusionGroup); - JIT_ASSERT(chunk->kind() == prim::ConstantChunk); + AT_ASSERT(consumer->kind() == prim::FusionGroup); + AT_ASSERT(chunk->kind() == prim::ConstantChunk); // if producer's input is already an input to a prim::ConstantChunk node, // we cannot add a new prim::ConstantChunk node because of invariant (2). @@ -606,14 +606,14 @@ struct GraphFuser { auto new_tensors_it = new_tensors.begin(); for (size_t i = 0; i < node->inputs().size(); ++i) { if (node->inputs()[i]->type()->isSubtypeOf(DynamicType::get())) { - JIT_ASSERT(new_tensors_it != new_tensors.end()); + AT_ASSERT(new_tensors_it != new_tensors.end()); node->replaceInput(i, *(new_tensors_it++)); } } } Node* promoteChunkToBroadcastingChunk(Node* chunk) { - JIT_ASSERT(chunk->kind() == prim::ConstantChunk); + AT_ASSERT(chunk->kind() == prim::ConstantChunk); size_t nchunks = chunk->i(attr::chunks); Node* bchunk = @@ -728,7 +728,7 @@ struct GraphFuser { } // multiple return operators Node* producer_for_chunk_node = producer_for_chunk->node(); - JIT_ASSERT(producer_for_chunk_node->outputs().size() == 1); + AT_ASSERT(producer_for_chunk_node->outputs().size() == 1); // Convert chunk to bchunk, if it isn't one already. The bchunk represents a // broadcast and one or more chunk operations. @@ -796,7 +796,7 @@ struct GraphFuser { auto chunked_inputs_it = chunked_inputs.begin(); for (Value* original_input : original_inputs) { if (original_input->type()->isSubtypeOf(DynamicType::get())) { - JIT_ASSERT(chunked_inputs_it != chunked_inputs.end()); + AT_ASSERT(chunked_inputs_it != chunked_inputs.end()); chunked_op->addInput( chunked_inputs_it->at(chunk_sel->offset() % nchunks)); ++chunked_inputs_it; @@ -824,7 +824,7 @@ struct GraphFuser { auto tensor_sizes = fmap(tensor_inputs, [](Value* v) { return v->owningGraph()->insert(aten::size, {v}); }); - JIT_ASSERT(!tensor_sizes.empty()); + AT_ASSERT(!tensor_sizes.empty()); Value* output_size = tensor_sizes.size() == 1 ? tensor_sizes[0] : broadcastSizes(tensor_sizes); @@ -915,7 +915,7 @@ struct GraphFuser { auto inputs = fusion_group->inputs(); auto sinputs = subgraph->inputs(); - JIT_ASSERT(inputs.size() == sinputs.size()); + AT_ASSERT(inputs.size() == sinputs.size()); for (size_t i = 0; i < inputs.size(); ++i) { shape_of[sinputs[i]] = graph->insert(aten::size, {inputs[i]}); } @@ -926,7 +926,7 @@ struct GraphFuser { // beginning of the kernel. auto outputs = fusion_group->outputs(); auto soutputs = subgraph->outputs(); - JIT_ASSERT(outputs.size() == soutputs.size()); + AT_ASSERT(outputs.size() == soutputs.size()); for (size_t i = 0; i < outputs.size(); ++i) { if (usedOnlyInSize(outputs[i])) continue; @@ -967,7 +967,7 @@ struct GraphFuser { }); auto shapes = fmap(tensor_inputs, [&](Value* v) { return shape_of.at(v); }); - JIT_ASSERT(!shapes.empty()); + AT_ASSERT(!shapes.empty()); shape_of.emplace( n->output(), shapes.size() == 1 ? shapes[0] : broadcastSizes(shapes)); } @@ -991,7 +991,7 @@ struct GraphFuser { if (usedOnlyInSize(output) && shape_of.count(soutput) > 0) { auto uses = output->uses(); for (Use u : uses) { - JIT_ASSERT(u.user->matches("aten::size(Tensor self) -> int[]")); + AT_ASSERT(u.user->matches("aten::size(Tensor self) -> int[]")); u.user->output()->replaceAllUsesWith(shape_of.at(soutput)); u.user->destroy(); } @@ -1026,7 +1026,7 @@ struct GraphFuser { } Node* createFusedConcat(Node* node) { - JIT_ASSERT(node->kind() == aten::cat); + AT_ASSERT(node->kind() == aten::cat); Graph* graph = node->owningGraph(); Node* list_construct = node->namedInput(attr::tensors)->node(); @@ -1062,7 +1062,7 @@ struct GraphFuser { } any_fused = true; auto maybe_group = tryFuse(fused_cat, input); - JIT_ASSERT(maybe_group && maybe_group == fused_cat); + AT_ASSERT(maybe_group && maybe_group == fused_cat); // We could have destroyed multiple inputs when performing this fusion, // so we have to recompute the list and iterate over it again. sorted_inputs = sortReverseTopological(fused_cat->inputs()); diff --git a/torch/csrc/jit/passes/loop_unrolling.cpp b/torch/csrc/jit/passes/loop_unrolling.cpp index 216d08e..b97403d 100644 --- a/torch/csrc/jit/passes/loop_unrolling.cpp +++ b/torch/csrc/jit/passes/loop_unrolling.cpp @@ -1,7 +1,7 @@ #include -#include -#include +#include +#include #include #include @@ -108,7 +108,7 @@ void repeatBody(Block* body, int64_t times) { // Update loop-carried values // NB: note that we don't need to worry about the loop counter, because // we've replaced it with a loop-carried variable - JIT_ASSERT(body->inputs().size() == body->outputs().size()); + AT_ASSERT(body->inputs().size() == body->outputs().size()); for (size_t i = 1; i < body->inputs().size(); ++i) { value_map[body->inputs()[i]] = get_value(body->outputs()[i]); } diff --git a/torch/csrc/jit/passes/lower_tuples.cpp b/torch/csrc/jit/passes/lower_tuples.cpp index 9c1764c..16d91f5 100644 --- a/torch/csrc/jit/passes/lower_tuples.cpp +++ b/torch/csrc/jit/passes/lower_tuples.cpp @@ -1,4 +1,4 @@ -#include +#include #include #include #include @@ -83,10 +83,10 @@ static void VisitNode(Node* n, Node* insert_point) { for (size_t i = 0; i < n->inputs().size();) { auto input = n->inputs()[i]; if (TupleTypePtr tt = input->type()->cast()) { - JIT_ASSERTM( + AT_CHECK( white_list.count(n->kind()) > 0, "tuple appears in op that does not forward tuples"); - JIT_ASSERTM( + AT_CHECK( input->node()->kind() == prim::TupleConstruct, "tuple use not matched to tuple construct"); for (size_t j = 0; j < tt->elements().size(); ++j) { @@ -112,7 +112,7 @@ static void VisitNode(Node* n, Node* insert_point) { // tup = (t0, t1) // is placed at the current insertion point if (TupleTypePtr tt = output->type()->cast()) { - JIT_ASSERTM( + AT_CHECK( white_list.count(n->kind()) > 0, "tuple appears in op that does not forward tuples"); for (size_t j = 0; j < tt->elements().size(); j++) { @@ -150,7 +150,7 @@ static void LowerAllTuples(Block* block) { static void EnsureNoTuples(ArrayRef values) { for (Value* v : values) { - JIT_ASSERTM( + AT_CHECK( v->type()->kind() != TypeKind::TupleType, "Couldn't lower all tuples."); } } diff --git a/torch/csrc/jit/passes/onnx.cpp b/torch/csrc/jit/passes/onnx.cpp index 0f0f668..75f350a 100644 --- a/torch/csrc/jit/passes/onnx.cpp +++ b/torch/csrc/jit/passes/onnx.cpp @@ -1,6 +1,6 @@ #include #include -#include +#include #include #include #include @@ -63,8 +63,8 @@ void BlockToONNX( // Returns a node that n maps to in the new graph auto envFn = [&env](Value* n) -> Value* { auto it = env.find(n); - JIT_ASSERTM(it != env.end(), "Dangling node reference"); - JIT_ASSERTM(it->second, "Unused node was subsequently used"); + AT_CHECK(it != env.end(), "Dangling node reference"); + AT_CHECK(it->second, "Unused node was subsequently used"); return it->second; }; @@ -199,13 +199,13 @@ void BlockToONNX( for (auto arg_type : op->cconv) { py::object obj; if (arg_type == 'c') { - JIT_ASSERTM( + AT_CHECK( scalar_it != op->scalar_args.end(), "expected too many scalar args"); obj = py::reinterpret_borrow( py::handle((scalar_it++)->get())); } else if (arg_type == 'd') { - JIT_ASSERTM(node_it != inputs.end(), "expected too many inputs"); + AT_CHECK(node_it != inputs.end(), "expected too many inputs"); obj = py::cast(envFn(*node_it++)); } else { throw std::runtime_error("unexpected calling convention"); diff --git a/torch/csrc/jit/passes/onnx/fixup_onnx_loop.cpp b/torch/csrc/jit/passes/onnx/fixup_onnx_loop.cpp index bc6ba90..b6594a4 100644 --- a/torch/csrc/jit/passes/onnx/fixup_onnx_loop.cpp +++ b/torch/csrc/jit/passes/onnx/fixup_onnx_loop.cpp @@ -5,8 +5,8 @@ namespace jit { void FixupONNXLoops(Block* block) { for (auto* node : block->nodes()) { - if (node->kind() == torch::jit::onnx::Loop) { - JIT_ASSERT(node->blocks().size() == 1); + if (node->kind() == ::c10::onnx::Loop) { + AT_ASSERT(node->blocks().size() == 1); auto* sub_block = node->blocks()[0]; sub_block->insertInput(1, "cond"); } diff --git a/torch/csrc/jit/passes/onnx/peephole.cpp b/torch/csrc/jit/passes/onnx/peephole.cpp index c0e9527..eae1406 100644 --- a/torch/csrc/jit/passes/onnx/peephole.cpp +++ b/torch/csrc/jit/passes/onnx/peephole.cpp @@ -1,4 +1,4 @@ -#include +#include #include #include @@ -11,6 +11,10 @@ typedef SSIZE_T ssize_t; namespace torch { namespace jit { +namespace onnx { +using namespace ::c10::onnx; +} + bool isRNN(const Node* node) { auto k = node->kind(); return k == onnx::RNN || k == onnx::LSTM || k == onnx::GRU; @@ -35,11 +39,11 @@ bool isNopTranspose(const std::vector& perm) { std::vector composeTransposes( const std::vector& t1, const std::vector& t2) { - JIT_ASSERT(t1.size() == t2.size()); + AT_ASSERT(t1.size() == t2.size()); std::vector ret; ret.reserve(t1.size()); for (const auto& i : t2) { - JIT_ASSERT(i < int64_t(t1.size())); + AT_ASSERT(i < int64_t(t1.size())); ret.push_back(t1[i]); } return ret; @@ -97,7 +101,7 @@ void fuseBroadcast(Block* b) { auto& broadcast_positions = getBroadcastPositions(n); if (!broadcast_positions.empty()) { - JIT_ASSERT(!n->hasAttribute(attr::axis)); + AT_ASSERT(!n->hasAttribute(attr::axis)); } for (size_t position : broadcast_positions) { @@ -492,7 +496,7 @@ static void speculateOps(Block* block) { static void replaceInputWithList(Node* node, size_t i, ArrayRef to) { node->removeInput(i); for (auto* to_val : to) { - JIT_ASSERT(to_val->owningGraph() == node->owningGraph()); + AT_ASSERT(to_val->owningGraph() == node->owningGraph()); node->insertInput(i++, to_val); } } diff --git a/torch/csrc/jit/passes/python_print.cpp b/torch/csrc/jit/passes/python_print.cpp index 16ad0e7..7421fb0 100644 --- a/torch/csrc/jit/passes/python_print.cpp +++ b/torch/csrc/jit/passes/python_print.cpp @@ -1,4 +1,4 @@ -#include +#include #include #include #include @@ -117,7 +117,7 @@ struct QualifiedName : c10::intrusive_ptr_target { } out << name_; } else { - JIT_ASSERT(prefix_); + AT_ASSERT(prefix_); out << "getattr("; prefix_->emit(out); out << ", "; @@ -281,7 +281,7 @@ struct PythonPrintPass { // block_point's output. Node* scanValue(Node* block_point, Value* v) { Node* n = v->node(); - JIT_ASSERT(isConstantLike(n) || output_inline_.count(n) == 0); + AT_ASSERT(isConstantLike(n) || output_inline_.count(n) == 0); if (n == block_point && canInline(v)) { // the node must be at the expected point of the typical @@ -336,7 +336,7 @@ struct PythonPrintPass { return i; } } - JIT_ASSERT(t.is_variable()); + AT_ASSERT(t.is_variable()); tensor_table_.emplace_back(std::move(t)); return tensor_table_.size() - 1; } @@ -825,7 +825,7 @@ struct PythonPrintPass { } } else { // vararg functions like format can have extra arguments - JIT_ASSERT(schema.is_vararg()); + AT_ASSERT(schema.is_vararg()); } stmt << v; } @@ -913,7 +913,7 @@ struct PythonPrintPass { } // have we use all the provided defaults? - JIT_ASSERT(defaults_offset == defaults.end()); + AT_ASSERT(defaults_offset == defaults.end()); out << ") -> " << resultType(graph)->python_str() << ":\n"; { @@ -1060,8 +1060,8 @@ TORCH_API bool printerHasSpecialCaseFor(Symbol sym) { // to be correctly printed for export (a process that happens before // optimization passes run) const static std::unordered_set unneeded = { - onnx::Reshape, // only used in onnx - onnx::Shape, // only used in onnx + c10::onnx::Reshape, // only used in onnx + c10::onnx::Shape, // only used in onnx prim::AnyDefined, // temporarily inserted by autograd prim::AutogradAdd, // temporarily inserted by autograd prim::ConstantChunk, // optimization pass adds it diff --git a/torch/csrc/jit/passes/requires_grad_analysis.cpp b/torch/csrc/jit/passes/requires_grad_analysis.cpp index 36d00ca..881fc74 100644 --- a/torch/csrc/jit/passes/requires_grad_analysis.cpp +++ b/torch/csrc/jit/passes/requires_grad_analysis.cpp @@ -1,7 +1,7 @@ #include #include #include -#include +#include #include @@ -23,7 +23,7 @@ void setRequiresGrad(Value* value, bool req_value) { void setRequiresGrad( at::ArrayRef outputs, const std::vector& values) { - JIT_ASSERT(outputs.size() == values.size()); + AT_ASSERT(outputs.size() == values.size()); for (size_t i = 0; i < values.size(); ++i) { setRequiresGrad(outputs[i], values[i]); } @@ -34,7 +34,7 @@ void setRequiresGrad(Node* node, const std::vector& values) { } std::vector bitwiseOr(std::vector a, const std::vector& b) { - JIT_ASSERT(a.size() == b.size()); + AT_ASSERT(a.size() == b.size()); for (size_t i = 0; i < a.size(); ++i) { a[i] = a[i] || b[i]; } diff --git a/torch/csrc/jit/passes/shape_analysis.cpp b/torch/csrc/jit/passes/shape_analysis.cpp index e8b8425..5efdb38 100644 --- a/torch/csrc/jit/passes/shape_analysis.cpp +++ b/torch/csrc/jit/passes/shape_analysis.cpp @@ -1,7 +1,7 @@ #include #include -#include +#include #include #include #include @@ -21,6 +21,10 @@ namespace torch { namespace jit { +namespace prim { +using namespace ::c10::prim; +} + struct propagation_error : std::exception {}; #define SHAPE_ASSERT(cond) \ @@ -148,12 +152,12 @@ class ShapePropagator { ArrayRef lhs, ArrayRef rhs, ArrayRef outputs) { - JIT_ASSERT(lhs.size() == rhs.size() && rhs.size() == outputs.size()); + AT_ASSERT(lhs.size() == rhs.size() && rhs.size() == outputs.size()); bool changed = false; for (size_t i = 0; i < lhs.size(); ++i) { auto old_output_type = outputs[i]->type(); auto new_type = unifyTypes(lhs[i]->type(), rhs[i]->type()); - JIT_ASSERT(new_type); + AT_ASSERT(new_type); outputs[i]->setType(*new_type); if (*old_output_type != *outputs[i]->type()) changed = true; @@ -269,7 +273,7 @@ class ShapePropagator { // preceded by schema checking. op(stack); - JIT_ASSERT(stack.size() == node->outputs().size()); + AT_ASSERT(stack.size() == node->outputs().size()); for (size_t i = 0; i < stack.size(); ++i) { // some ops may have mixed tensor/primitive outputs // for primitives, we don't need to change the type because it is already @@ -408,7 +412,7 @@ class ShapePropagator { } case prim::TupleUnpack: { auto tuple_type = node->input()->type()->cast(); - JIT_ASSERT( + AT_ASSERT( tuple_type && tuple_type->elements().size() == node->outputs().size()); auto elems = tuple_type->elements(); @@ -470,7 +474,7 @@ class ShapePropagator { } static c10::optional determineListSize(Value* list) { - JIT_ASSERT(list->type()->cast()); + AT_ASSERT(list->type()->cast()); if (auto shape = constant_as>(list)) { return shape->size(); } @@ -500,7 +504,7 @@ class ShapePropagator { if (tensor_types.size() == 1) { return tensor_types[0]; } - JIT_ASSERT(!tensor_types.empty()); + AT_ASSERT(!tensor_types.empty()); auto any_type = tensor_types[arg_for_type]; auto max_dims = any_type->dim(); for (auto& type : tensor_types) { @@ -1108,9 +1112,9 @@ class ShapePropagator { return false; } else { auto outputs = node->outputs(); - JIT_ASSERT(types.size() == outputs.size()); + AT_ASSERT(types.size() == outputs.size()); for (size_t i = 0; i < types.size(); ++i) { - JIT_ASSERT(outputs[i]->type()->isSubtypeOf(DynamicType::get())); + AT_ASSERT(outputs[i]->type()->isSubtypeOf(DynamicType::get())); outputs[i]->setType(types[i]); } return true; @@ -1561,7 +1565,7 @@ class ShapePropagator { input_type->withSizesStrides(sizes, strides)); } return true; - } else if (node->kind() == onnx::Shape) { + } else if (node->kind() == ::c10::onnx::Shape) { SHAPE_ASSERT(node->inputs().size() == 1 && node->outputs().size() == 1); std::vector dim_vec = { (int64_t)tensor_types.at(0)->sizes().size()}; @@ -1569,7 +1573,7 @@ class ShapePropagator { node->output()->setType( CompleteTensorType::create(at::kLong, at::kCPU, dims)); return true; - } else if (node->kind() == onnx::Reshape) { + } else if (node->kind() == ::c10::onnx::Reshape) { setUnshapedType(node); return true; } diff --git a/torch/csrc/jit/passes/specialize_undef.cpp b/torch/csrc/jit/passes/specialize_undef.cpp index 799a304..a73650d 100644 --- a/torch/csrc/jit/passes/specialize_undef.cpp +++ b/torch/csrc/jit/passes/specialize_undef.cpp @@ -54,7 +54,7 @@ void specializeUndef(Graph& g) { // where we do not know if a value is defined since at the top level // a gradient graph is composed of Linear nodes and AutogradAdds // and LinearNodes only appear in these graphs - JIT_ASSERT(state[input] != State::Unknown); + AT_ASSERT(state[input] != State::Unknown); } // hoist the nodes in the GradOf body to be before the linear block for (auto it = body->nodes().begin(); it != body->nodes().end();) { diff --git a/torch/csrc/jit/passes/utils/check_alias_annotation.cpp b/torch/csrc/jit/passes/utils/check_alias_annotation.cpp index 93e18df..de3e30e 100644 --- a/torch/csrc/jit/passes/utils/check_alias_annotation.cpp +++ b/torch/csrc/jit/passes/utils/check_alias_annotation.cpp @@ -88,7 +88,7 @@ void checkInputPreconditions(const Stack& inputs) { } const auto& lhs = inputs.at(i); const auto& rhs = inputs.at(j); - JIT_ASSERT(!lhs.isAliasOf(rhs)); + AT_ASSERT(!lhs.isAliasOf(rhs)); } } } @@ -103,8 +103,8 @@ void checkAliases( if (output.iValue.isAliasOf(input.iValue)) { const auto inputSet = input.aliasInfo; const auto outputSet = output.aliasInfo; - JIT_ASSERT(inputSet && outputSet); - JIT_ASSERT(inputSet->isSubsetOf(*outputSet)); + AT_ASSERT(inputSet && outputSet); + AT_ASSERT(inputSet->isSubsetOf(*outputSet)); } } } @@ -115,12 +115,12 @@ void checkAliases( void checkWrites( const std::vector& inputs, const std::vector& deepCopiedInputs) { - JIT_ASSERT(inputs.size() == deepCopiedInputs.size()); + AT_ASSERT(inputs.size() == deepCopiedInputs.size()); for (size_t i = 0; i < inputs.size(); i++) { const auto& input = inputs[i]; const auto& deepCopiedInput = deepCopiedInputs[i]; if (!input.aliasInfo || !input.aliasInfo->isWrite()) { - JIT_ASSERT(deepEquals(input.iValue, deepCopiedInput)); + AT_ASSERT(deepEquals(input.iValue, deepCopiedInput)); } } } @@ -134,7 +134,7 @@ const Node* findNodeForOp( return node; } } - JIT_ASSERT(false); + AT_ASSERT(false); } // Handle a few special cases where we need to propagate constants @@ -205,7 +205,7 @@ void checkAliasAnnotation( if (inputValue) { push(stack, *inputValue); } else { - JIT_ASSERT(input->type()->kind() == TypeKind::OptionalType); + AT_ASSERT(input->type()->kind() == TypeKind::OptionalType); push(stack, IValue()); } } diff --git a/torch/csrc/jit/passes/utils/subgraph_utils.cpp b/torch/csrc/jit/passes/utils/subgraph_utils.cpp index 77f3dbf..5e21f02 100644 --- a/torch/csrc/jit/passes/utils/subgraph_utils.cpp +++ b/torch/csrc/jit/passes/utils/subgraph_utils.cpp @@ -33,12 +33,12 @@ std::shared_ptr getSubgraph(Node* n) { } void unmergeSubgraph(Node* subgraphNode) { - JIT_ASSERT(subgraphNode->kind() == prim::DifferentiableGraph); + AT_ASSERT(subgraphNode->kind() == prim::DifferentiableGraph); // Inline the graph, replace uses of node outputs and destroy the node const auto subgraphOutputs = inlineGraph( getSubgraph(subgraphNode), subgraphNode->inputs(), subgraphNode); - JIT_ASSERT(subgraphOutputs.size() >= subgraphNode->outputs().size()); + AT_ASSERT(subgraphOutputs.size() >= subgraphNode->outputs().size()); for (size_t i = 0; i < subgraphNode->outputs().size(); ++i) { subgraphNode->outputs()[i]->replaceAllUsesWith(subgraphOutputs[i]); } @@ -46,7 +46,7 @@ void unmergeSubgraph(Node* subgraphNode) { } void mergeNodeIntoSubgraph(Node* toMerge, Node* subgraphNode) { - JIT_ASSERT(hasSubgraph(subgraphNode)); + AT_ASSERT(hasSubgraph(subgraphNode)); if (hasSubgraph(toMerge)) { return mergeSubgraph(subgraphNode, toMerge); } @@ -56,7 +56,7 @@ void mergeNodeIntoSubgraph(Node* toMerge, Node* subgraphNode) { // Map from values in the surrounding graph to inputs in the subgraph std::unordered_map inputsMap; - JIT_ASSERT(subgraphNode->inputs().size() == subgraph->inputs().size()); + AT_ASSERT(subgraphNode->inputs().size() == subgraph->inputs().size()); size_t idx = 0; for (auto input : subgraphNode->inputs()) { inputsMap[input] = subgraph->inputs()[idx]; @@ -139,7 +139,7 @@ std::vector inlineGraph( // Initialize a map of inner graph values to outer graph values std::unordered_map innerToOuter; const auto innerInputs = subgraph->inputs(); - JIT_ASSERT(outerInputs.size() == innerInputs.size()); + AT_ASSERT(outerInputs.size() == innerInputs.size()); for (size_t i = 0; i < innerInputs.size(); ++i) { innerToOuter[innerInputs[i]] = outerInputs[i]; } diff --git a/torch/csrc/jit/pybind.h b/torch/csrc/jit/pybind.h index 1e03fb8..e1b7c3b 100644 --- a/torch/csrc/jit/pybind.h +++ b/torch/csrc/jit/pybind.h @@ -5,8 +5,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/torch/csrc/jit/pybind_utils.h b/torch/csrc/jit/pybind_utils.h index 9e8f9af..6045340 100644 --- a/torch/csrc/jit/pybind_utils.h +++ b/torch/csrc/jit/pybind_utils.h @@ -1,16 +1,16 @@ #pragma once #include -#include -#include +#include #include #include #include -#include +#include #include #include #include +#include #include #include @@ -31,6 +31,9 @@ namespace torch { namespace jit { namespace detail { +using ::c10::Argument; +using ::c10::FunctionSchema; + // error reporting: when reporting user-caused errors, these functions should // not use AT_ERROR macros, since these macros add stack trace information // that is confusing to display to the end user since it always reports diff --git a/torch/csrc/jit/python_interpreter.cpp b/torch/csrc/jit/python_interpreter.cpp index f2ae189..5fdff64 100644 --- a/torch/csrc/jit/python_interpreter.cpp +++ b/torch/csrc/jit/python_interpreter.cpp @@ -30,8 +30,8 @@ namespace { Operation createPythonOperation(const Node* op_) { AutoGIL gil; const PythonOp* op = static_cast(op_); - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) const py::function func = py::reinterpret_borrow( + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) py::handle(const_cast(op)->pyobj.get())); size_t num_inputs = 0; @@ -40,7 +40,7 @@ Operation createPythonOperation(const Node* op_) { num_inputs++; } - JIT_ASSERT(op->outputs().size() == 1); + AT_ASSERT(op->outputs().size() == 1); return [=](Stack& stack) { AutoGIL gil; @@ -50,8 +50,8 @@ Operation createPythonOperation(const Node* op_) { size_t next_tensor = 0; for (auto arg_type : op->cconv) { if (arg_type == 'c') { - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) py_inputs[i] = py::reinterpret_borrow( + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) const_cast(op)->scalar_args[next_scalar++].get()); } else if (arg_type == 'd') { py_inputs[i] = diff --git a/torch/csrc/jit/python_tracer.cpp b/torch/csrc/jit/python_tracer.cpp index aa56f4a..45365b9 100644 --- a/torch/csrc/jit/python_tracer.cpp +++ b/torch/csrc/jit/python_tracer.cpp @@ -163,7 +163,7 @@ void initPythonTracerBindings(PyObject* module) { }); m.def("_tracer_set_get_unique_name_fn", [](py::function func) { const auto& tracing_state = getTracingState(); - JIT_ASSERT(tracing_state); + AT_ASSERT(tracing_state); tracing_state->lookup_var_name_fn = [func](const Variable& var) -> std::string { AutoGIL ag; @@ -172,7 +172,7 @@ void initPythonTracerBindings(PyObject* module) { }); m.def("_tracer_set_force_outplace", [](bool force_outplace) { const auto& tracing_state = getTracingState(); - JIT_ASSERT(tracing_state); + AT_ASSERT(tracing_state); tracing_state->force_outplace = force_outplace; }); } diff --git a/torch/csrc/jit/register_prim_ops.cpp b/torch/csrc/jit/register_prim_ops.cpp index 1831a70..573c17d 100644 --- a/torch/csrc/jit/register_prim_ops.cpp +++ b/torch/csrc/jit/register_prim_ops.cpp @@ -468,7 +468,7 @@ RegisterOperators reg({ (shape[dim] + split_size - 1) / split_size, 1); last_shape[dim] = split_size - (split_size * num_splits - shape[dim]); - JIT_ASSERT(last_shape[dim] >= 0); + AT_ASSERT(last_shape[dim] >= 0); } push(stack, std::move(regular_shape)); push(stack, std::move(last_shape)); @@ -518,20 +518,20 @@ RegisterOperators reg({ }; }), Operator( - onnx::Reshape, + c10::onnx::Reshape, [](const Node* node) { return [=](Stack& stack) { at::Tensor input, shape; pop(stack, input, shape); shape = shape.contiguous(); - JIT_ASSERT(shape.ndimension() == 1); + AT_ASSERT(shape.ndimension() == 1); at::IntList shape_list(shape.data(), shape.size(0)); push(stack, input.reshape(shape_list)); return 0; }; }), Operator( - onnx::Shape, + c10::onnx::Shape, [](const Node* node) { return [=](Stack& stack) { auto t = pop(stack).toTensor(); @@ -671,7 +671,7 @@ RegisterOperators reg({ int64_t num_results = result.size(); if (num_results != chunks) { if (num_results > chunks) { - JIT_ASSERTM( + AT_CHECK( num_results == chunks, "Expected chunk to return ", chunks, @@ -808,8 +808,8 @@ RegisterOperators reg({ [](const Node* node) { Code code(node->g(attr::Subgraph)); int n_inputs = node->inputs().size(); - JIT_ASSERT(node->blocks().size() == 0); - JIT_ASSERT(node->hasAttribute(attr::Subgraph)); + AT_ASSERT(node->blocks().size() == 0); + AT_ASSERT(node->hasAttribute(attr::Subgraph)); return [=](Stack& stack) { // Move inputs to a separate stack InterpreterState forked_interprester(code); diff --git a/torch/csrc/jit/register_special_ops.cpp b/torch/csrc/jit/register_special_ops.cpp index c5ca2b3..4c5d33a 100644 --- a/torch/csrc/jit/register_special_ops.cpp +++ b/torch/csrc/jit/register_special_ops.cpp @@ -125,7 +125,7 @@ void recursiveStore(char* data, const std::vector& sizes, const c10::Ar data += strides[dim] * elementSize; } } else { - JIT_ASSERT(obj.isIntList() || obj.isDoubleList() || obj.isBoolList()); + AT_ASSERT(obj.isIntList() || obj.isDoubleList() || obj.isBoolList()); if (obj.isIntList()) { storeLastDimension(data, sizes, strides, dim, elementSize, obj.toIntListRef()); } else if (obj.isDoubleList()){ @@ -168,7 +168,7 @@ RegisterOperators reg({ auto defaults = peek(stack, 1, 2).toIntListRef(); drop(stack, 2); - JIT_ASSERT(defaults.size() > list.size()); + AT_ASSERT(defaults.size() > list.size()); // TODO: allow list of optionals to be filled in with defaults // i.e. list_with_default([1, 2, None], [1, 2, 3]) -> [1, 2, 3] diff --git a/torch/csrc/jit/scope.cpp b/torch/csrc/jit/scope.cpp index 38f98ac..3690a7a 100644 --- a/torch/csrc/jit/scope.cpp +++ b/torch/csrc/jit/scope.cpp @@ -1,6 +1,6 @@ #include -#include +#include #include #include diff --git a/torch/csrc/jit/scope.h b/torch/csrc/jit/scope.h index 263c080..b01228d 100644 --- a/torch/csrc/jit/scope.h +++ b/torch/csrc/jit/scope.h @@ -1,8 +1,8 @@ #pragma once #include #include -#include -#include +#include +#include #include @@ -18,6 +18,7 @@ namespace jit { // will always be valid as long as Graph is alive. struct Scope; using ScopePtr = c10::intrusive_ptr; +using c10::Symbol; struct TORCH_API Scope : public c10::intrusive_ptr_target { private: diff --git a/torch/csrc/jit/script/builtin_functions.cpp b/torch/csrc/jit/script/builtin_functions.cpp index 7a5763d..02e2ad1 100644 --- a/torch/csrc/jit/script/builtin_functions.cpp +++ b/torch/csrc/jit/script/builtin_functions.cpp @@ -53,7 +53,7 @@ struct BuiltinFunctionRegistry { loadBuiltinFunctions(); state = INITIALIZED; } - JIT_ASSERT(state == INITIALIZED); + AT_ASSERT(state == INITIALIZED); auto it = builtins_by_name.find(name); if (it == builtins_by_name.end()) return empty; diff --git a/torch/csrc/jit/script/compiler.cpp b/torch/csrc/jit/script/compiler.cpp index 681bee7..fc3f88c 100644 --- a/torch/csrc/jit/script/compiler.cpp +++ b/torch/csrc/jit/script/compiler.cpp @@ -1,5 +1,5 @@ #include -#include +#include #include #include #include @@ -435,8 +435,8 @@ struct Environment { // inputs: loop_counter, lcd0, lcd1, ... // outputs: loop_condition, lcd0, lcd1, ... // captured_inputs: lcd0, lcd1, ... - JIT_ASSERT(b->inputs().size() == b->outputs().size()); - JIT_ASSERT(b->inputs().size() == captured_inputs.size() + 1); + AT_ASSERT(b->inputs().size() == b->outputs().size()); + AT_ASSERT(b->inputs().size() == captured_inputs.size() + 1); for (size_t i = b->inputs().size() - 1; i > 0; i--) { // nothing changed along this loop if (b->inputs()[i] == b->outputs()[i]) { @@ -520,7 +520,7 @@ struct to_ir { graph(method.graph()), resolver(std::move(resolver_)), environment_stack(nullptr) { - JIT_ASSERT(resolver); + AT_ASSERT(resolver); pushFrame(graph->block(), /*starts_def=*/true); // Type annotations exclude explicitly typing the "self" parameter, so in @@ -724,7 +724,7 @@ struct to_ir { << expected_annotation_size << ")!"; } if (self) { - JIT_ASSERT(it != end); + AT_ASSERT(it != end); environment_stack->setSugaredVar(def.range(), (*it).ident().name(), self); ++it; } @@ -750,7 +750,7 @@ struct to_ir { const FunctionSchema& schema, Block* block) { // rewrites ensure there is always a return statement in program - JIT_ASSERT(def_stack_.back().merged_return_type_); + AT_ASSERT(def_stack_.back().merged_return_type_); // outputs Value* result = environment_stack->getVar("$return", range); block->registerOutput(result); @@ -865,7 +865,7 @@ struct to_ir { << result->type()->python_str(); } } - JIT_ASSERT(result_type); + AT_ASSERT(result_type); def_stack_.back().merged_return_type_ = result_type; environment_stack->setVar(stmt.range(), "$return", result); } @@ -1608,7 +1608,7 @@ struct to_ir { // list.set_item(get_item(idx).add_(value)) // similar to how Python handles things. const auto listType = sliceable->type()->cast(); - JIT_ASSERT(listType != nullptr); + AT_ASSERT(listType != nullptr); bool isTensorList = listType->getElementType()->isSubtypeOf(DynamicType::get()); @@ -2090,7 +2090,7 @@ struct to_ir { Stack stack; stack.push_back(*maybe_constant_input); op(stack); - JIT_ASSERT(stack.size() == 1); + AT_ASSERT(stack.size() == 1); return graph->insertConstant(stack[0], tree->range()); } @@ -2290,10 +2290,10 @@ struct to_ir { // XXX: If list slicing becomes more complicated or stops using // aten::slice, we should separate it from this function. if (dim) { - JIT_ASSERT(input->type()->isSubtypeOf(DynamicType::get())); + AT_ASSERT(input->type()->isSubtypeOf(DynamicType::get())); args.emplace_back(loc, "dim", graph->insertConstant(dim.value(), loc)); } else { - JIT_ASSERT(!input->type()->isSubtypeOf(DynamicType::get())); + AT_ASSERT(!input->type()->isSubtypeOf(DynamicType::get())); } args.emplace_back(loc, "begin", emitExpr(Expr(slice.startOr(0)))); @@ -2420,8 +2420,8 @@ struct to_ir { const SourceRange& loc, Value* sliceable, const List& subscript_exprs) { - JIT_ASSERT(subscript_exprs.size() == 1); - JIT_ASSERT(subscript_exprs[0].kind() == TK_SLICE_EXPR); + AT_ASSERT(subscript_exprs.size() == 1); + AT_ASSERT(subscript_exprs[0].kind() == TK_SLICE_EXPR); auto slice_exp = SliceExpr(subscript_exprs[0]); c10::optional maybe_dim; if (sliceable->type()->isSubtypeOf(DynamicType::get())) { @@ -2516,7 +2516,7 @@ struct to_ir { const SourceRange& loc, Value* gatherable, const List& subscript_exprs) { - JIT_ASSERT(subscript_exprs.size() == 1); + AT_ASSERT(subscript_exprs.size() == 1); if (gatherable->type()->kind() == TypeKind::ListType) { // if it's a list, emit a regular index selection op @@ -2540,14 +2540,14 @@ void defineMethodsInModule( const std::vector& definitions, const std::vector& resolvers, const SugaredValuePtr& self) { - JIT_ASSERT(definitions.size() == resolvers.size()); + AT_ASSERT(definitions.size() == resolvers.size()); auto resolver_it = resolvers.begin(); std::vector methods; std::unordered_map function_table; for (const Def& def : definitions) { const std::string& name = def.name().name(); auto resolver = *resolver_it++; - JIT_ASSERT(resolver); + AT_ASSERT(resolver); if (!self) { // if self is defined, then these are methods and do not go into the // global namespace otherwise, they get defined together so we add them to @@ -2564,7 +2564,7 @@ void defineMethodsInModule( }; } auto creator = [def, resolver, self](Method& method) { - JIT_ASSERT(resolver); + AT_ASSERT(resolver); to_ir(def, resolver, self, method); }; Method& method = m->create_method(name, creator); diff --git a/torch/csrc/jit/script/init.cpp b/torch/csrc/jit/script/init.cpp index 856328a..577c61b 100644 --- a/torch/csrc/jit/script/init.cpp +++ b/torch/csrc/jit/script/init.cpp @@ -9,7 +9,6 @@ #include #include -#include #include #include #include @@ -21,6 +20,7 @@ #include #include +#include #include #include @@ -35,6 +35,9 @@ namespace torch { namespace jit { namespace script { +using ::c10::Argument; +using ::c10::FunctionSchema; + using ResolutionCallback = std::function; using FunctionDefaults = std::unordered_map; @@ -139,7 +142,7 @@ struct VISIBILITY_HIDDEN PythonValue : public SugaredValue { for (auto& i : matched_schema->inputs) new_node->addInput(i); - JIT_ASSERT(matched_schema->return_types.size() == 1); + AT_ASSERT(matched_schema->return_types.size() == 1); Value* output = new_node->addOutput()->setType(matched_schema->return_types.at(0)); return std::make_shared(output); diff --git a/torch/csrc/jit/script/lexer.h b/torch/csrc/jit/script/lexer.h index 7890d7d..a19731f 100644 --- a/torch/csrc/jit/script/lexer.h +++ b/torch/csrc/jit/script/lexer.h @@ -1,5 +1,5 @@ #pragma once -#include +#include #include #include #include @@ -117,7 +117,7 @@ struct TokenTrie { TokenTrie() : kind(0) {} void insert(const char* str, int tok) { if (*str == '\0') { - JIT_ASSERT(kind == 0); + AT_ASSERT(kind == 0); kind = tok; return; } @@ -489,7 +489,7 @@ struct Lexer { int kind; size_t start; size_t length; - JIT_ASSERT(file); + AT_ASSERT(file); if (!shared.match( *file, pos, diff --git a/torch/csrc/jit/script/module.cpp b/torch/csrc/jit/script/module.cpp index 3aa1418..1b3560b 100644 --- a/torch/csrc/jit/script/module.cpp +++ b/torch/csrc/jit/script/module.cpp @@ -1,4 +1,4 @@ -#include +#include #include #include #include @@ -64,7 +64,7 @@ Value* Method::emit_call_to( Method& callee, ArrayRef args, ArrayRef kwargs) { - JIT_ASSERT(!executor); + AT_ASSERT(!executor); std::stringstream failure_messages; if (auto result = try_emit_call_to( *graph(), diff --git a/torch/csrc/jit/script/module.h b/torch/csrc/jit/script/module.h index 8712404..a5bc773 100644 --- a/torch/csrc/jit/script/module.h +++ b/torch/csrc/jit/script/module.h @@ -1,8 +1,7 @@ #pragma once #include #include -#include -#include +#include #include #include #include @@ -13,6 +12,7 @@ #include #include +#include #include #include @@ -32,6 +32,9 @@ namespace torch { namespace jit { namespace script { +using ::c10::Argument; +using ::c10::FunctionSchema; + // A method in a module, e.g. f in: // // class M(ScriptModule): @@ -57,7 +60,7 @@ struct Method { optimize(optimize), member_inputs(std::move(initial_members)), method_creator(std::move(method_creator)) { - JIT_ASSERT(graph_->inputs().size() >= member_inputs.size()); + AT_ASSERT(graph_->inputs().size() >= member_inputs.size()); int i = graph_->inputs().size() - member_inputs.size(); for (at::Tensor* member : member_inputs) { member_input_index[member] = i++; @@ -151,7 +154,7 @@ struct Method { ArgumentSpec(with_grad, fmap(inputs), inputs.size())); PropagateInputShapes(retval); } - JIT_ASSERT(retval->inputs().size() == inputs.size()); + AT_ASSERT(retval->inputs().size() == inputs.size()); for (size_t i = 0; i < retval->inputs().size(); ++i) { auto scalar_type = inputs[i].type().scalarType(); auto sizes = inputs[i].sizes(); @@ -162,10 +165,10 @@ struct Method { at::ArrayRef output_values = retval->outputs(); // patch this to still work if we are returning a tuple of multiple values if (output_values.at(0)->type()->kind() == TupleType::Kind) { - JIT_ASSERT(output_values.at(0)->node()->kind() == prim::TupleConstruct); + AT_ASSERT(output_values.at(0)->node()->kind() == prim::TupleConstruct); output_values = output_values.at(0)->node()->inputs(); } - JIT_ASSERT(output_values.size() == outputs.size()); + AT_ASSERT(output_values.size() == outputs.size()); for (size_t i = 0; i < retval->outputs().size(); ++i) { auto scalar_type = outputs[i].type().scalarType(); auto sizes = outputs[i].sizes(); @@ -193,7 +196,7 @@ struct Method { } std::string pretty_print_schema() const { - JIT_ASSERT(schema); + AT_ASSERT(schema); std::stringstream ss; ss << *schema; return ss.str(); @@ -405,7 +408,7 @@ struct Module { const std::string& name, std::shared_ptr graph, std::vector member_inputs) { - JIT_ASSERT(graph); + AT_ASSERT(graph); std::unique_ptr method(new Method( this, name, diff --git a/torch/csrc/jit/script/schema_matching.h b/torch/csrc/jit/script/schema_matching.h index 937bdc2..ea54d74 100644 --- a/torch/csrc/jit/script/schema_matching.h +++ b/torch/csrc/jit/script/schema_matching.h @@ -1,8 +1,9 @@ #pragma once #include -#include #include -#include +#include + +#include namespace torch { namespace jit { @@ -20,7 +21,7 @@ struct MatchedSchema { }; TORCH_API c10::optional tryMatchSchema( - const FunctionSchema& schema, + const ::c10::FunctionSchema& schema, const SourceRange& loc, Graph& graph, c10::optional self, diff --git a/torch/csrc/jit/script/type_parser.cpp b/torch/csrc/jit/script/type_parser.cpp index 61c137b..74f2baf 100644 --- a/torch/csrc/jit/script/type_parser.cpp +++ b/torch/csrc/jit/script/type_parser.cpp @@ -115,7 +115,7 @@ c10::optional> parseBroadcastList( << "Broadcastable lists only supported for int or float"; auto elem_ptr = ident_to_type_lut().find(value_name); - JIT_ASSERT(elem_ptr != ident_to_type_lut().end()); + AT_ASSERT(elem_ptr != ident_to_type_lut().end()); TypePtr list_ptr = ListType::create(elem_ptr->second); const char* len_c = len.c_str(); diff --git a/torch/csrc/jit/source_range.h b/torch/csrc/jit/source_range.h index 7229b8d..7bd6870 100644 --- a/torch/csrc/jit/source_range.h +++ b/torch/csrc/jit/source_range.h @@ -1,5 +1,5 @@ #pragma once -#include +#include #include #include @@ -30,8 +30,8 @@ struct SourceRange : public SourceLocation { --begin_line; while (end_line < str.size() && str[end_line] != '\n') ++end_line; - JIT_ASSERT(begin_line == 0 || str[begin_line - 1] == '\n'); - JIT_ASSERT(end_line == str.size() || str[end_line] == '\n'); + AT_ASSERT(begin_line == 0 || str[begin_line - 1] == '\n'); + AT_ASSERT(end_line == str.size() || str[end_line] == '\n'); size_t begin_highlight = begin_line; // beginning of context, CONTEXT lines // before the highlight line @@ -41,7 +41,7 @@ struct SourceRange : public SourceLocation { if (i >= CONTEXT) break; } - JIT_ASSERT(begin_highlight == 0 || str[begin_highlight - 1] == '\n'); + AT_ASSERT(begin_highlight == 0 || str[begin_highlight - 1] == '\n'); size_t end_highlight = end_line; // end of context, CONTEXT lines after the highlight line @@ -51,7 +51,7 @@ struct SourceRange : public SourceLocation { if (i >= CONTEXT) break; } - JIT_ASSERT(end_highlight == str.size() || str[end_highlight] == '\n'); + AT_ASSERT(end_highlight == str.size() || str[end_highlight] == '\n'); out << str.substr(begin_highlight, end_line - begin_highlight) << "\n"; out << std::string(start() - begin_line, ' '); diff --git a/torch/csrc/jit/stack.h b/torch/csrc/jit/stack.h index 8ff3d35..f29739a 100644 --- a/torch/csrc/jit/stack.h +++ b/torch/csrc/jit/stack.h @@ -1,11 +1,12 @@ #pragma once #include -#include +#include namespace torch { namespace jit { +using c10::IValue; using Stack = std::vector; using Operation = std::function; @@ -65,7 +66,7 @@ static inline void pop(Stack& stack, Types&... args) { } template static inline void push(Stack& stack, Types&&... args) { - std::initializer_list{(stack.emplace_back(std::forward(args)), 0)...}; + std::initializer_list{(stack.emplace_back(std::forward(args)), 0)...}; } // The packer here is carefully written not to make any unnecessary diff --git a/torch/csrc/jit/symbolic_variable.h b/torch/csrc/jit/symbolic_variable.h index c7e9ef4..a397eca 100644 --- a/torch/csrc/jit/symbolic_variable.h +++ b/torch/csrc/jit/symbolic_variable.h @@ -199,7 +199,7 @@ struct SymbolicVariable { return create(aten::cat, {input_list, dim})[0]; } static SymbolicVariable cat(ArrayRef inputs, int dim) { - JIT_ASSERT(inputs.size() > 0); + AT_ASSERT(inputs.size() > 0); return SymbolicVariable::cat(inputs, inputs[0].insertConstant(dim)); } static SymbolicVariable stack(ArrayRef inputs, Value* dim) { @@ -212,12 +212,12 @@ struct SymbolicVariable { return create(aten::stack, {input_list, dim})[0]; } static SymbolicVariable stack(ArrayRef inputs, int dim) { - JIT_ASSERT(inputs.size() > 0); + AT_ASSERT(inputs.size() > 0); return SymbolicVariable::stack(inputs, inputs[0].insertConstant(dim)); } static std::vector broadcast_tensors( ArrayRef inputs) { - JIT_ASSERT(inputs.size() > 0); + AT_ASSERT(inputs.size() > 0); Graph* g = inputs[0].value()->owningGraph(); auto value_inputs = fmap(inputs, [](const SymbolicVariable& v) { return v.value(); }); diff --git a/torch/csrc/jit/tracer.cpp b/torch/csrc/jit/tracer.cpp index 5941fd4..61e0865 100644 --- a/torch/csrc/jit/tracer.cpp +++ b/torch/csrc/jit/tracer.cpp @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include @@ -42,7 +42,7 @@ thread_local std::shared_ptr tracing_state; void setValueTrace(const IValue& v, Value* value) { if (v.isTensor()) { auto var = v.toTensor(); - JIT_ASSERT(var.defined()); + AT_ASSERT(var.defined()); getTracingState()->value_map[var] = value; } else if (v.isTensorList()) { auto& outputs = v.toTensorList()->elements(); @@ -268,9 +268,9 @@ void ArgumentStash::stashIntListElem( if (!isTracing()) return; auto& list_trace = stash.intlists.emplace(arg_name, size).first->second; - JIT_ASSERT(size == list_trace.size()); - JIT_ASSERT(idx < list_trace.size()); - JIT_ASSERT(list_trace[idx] == nullptr); + AT_ASSERT(size == list_trace.size()); + AT_ASSERT(idx < list_trace.size()); + AT_ASSERT(list_trace[idx] == nullptr); Value* ten = getValueTrace(var); auto& g = *ten->owningGraph(); diff --git a/torch/csrc/jit/tracer.h b/torch/csrc/jit/tracer.h index 2fbe245..ff19819 100644 --- a/torch/csrc/jit/tracer.h +++ b/torch/csrc/jit/tracer.h @@ -4,7 +4,7 @@ #include #include #include -#include +#include #include #include #include @@ -23,6 +23,21 @@ namespace torch { namespace jit { namespace tracer { +using ::c10::ivalue::List; +using ::c10::ivalue::Shared; + +using ::c10::IValue; +using ::c10::ivalue::Future; +using ::c10::ivalue::Tuple; + +using ::c10::ivalue::BoolList; +using ::c10::ivalue::DoubleList; +using ::c10::ivalue::GenericList; +using ::c10::ivalue::IntList; +using ::c10::ivalue::TensorList; + +using ::c10::ivalue::ConstantString; + using torch::autograd::Variable; using variable_list = std::vector; @@ -35,7 +50,7 @@ TORCH_API void setRecordSourceLocation(void (*v)(Node*)); TORCH_API void setValueTrace(const IValue& v, Value* value); inline void delValueTrace(const Variable& var) { - JIT_ASSERT(var.defined()); + AT_ASSERT(var.defined()); getTracingState()->value_map.erase(var); } diff --git a/torch/csrc/jit/tracing_state.h b/torch/csrc/jit/tracing_state.h index ccb35a5..73a84bf 100644 --- a/torch/csrc/jit/tracing_state.h +++ b/torch/csrc/jit/tracing_state.h @@ -3,10 +3,10 @@ #include #include #include -#include +#include #include #include -#include +#include #include #include @@ -93,7 +93,7 @@ struct ArgumentStash { const std::string& arg_name, size_t idx, const Variable& var, - const TypePtr& type = nullptr); + const c10::TypePtr& type = nullptr); static bool hasValue(const std::string& arg_name) { return stash.values.count(arg_name) > 0; diff --git a/torch/csrc/jit/type.h b/torch/csrc/jit/type.h deleted file mode 100644 index ec81a65..0000000 --- a/torch/csrc/jit/type.h +++ /dev/null @@ -1,23 +0,0 @@ -#include - -namespace torch { -namespace jit { - -#define C10_USING(T) using ::c10::T; -C10_FORALL_TYPES(C10_USING) -#undef C10_USING - -#define C10_USING(T) using ::c10::T##Ptr; -C10_FORALL_TYPES(C10_USING) -#undef C10_USING - -using ::c10::Type; -using ::c10::TypeEnv; -using ::c10::TypePtr; - -using ::c10::getTypePtr; -using ::c10::MatchTypeReturn; -using ::c10::TypeKind; - -} // namespace jit -} // namespace torch