From f7e0bec32b1afaa2da9c43e0370d8954b100733a Mon Sep 17 00:00:00 2001 From: Sergei Barannikov/AI Tools Lab /SRR/Engineer/Samsung Electronics Date: Mon, 18 Nov 2019 12:53:55 +0300 Subject: [PATCH] [neurun] Do not use Graph::release* methods in Linear (#8957) Moving subgraphs, lowerinfo, etc. from Graph to Linear and then to Executors makes it difficult to understand who is the owner of these entities. It is enough to pass `Graph` to `Linear` by reference. Signed-off-by: Sergei Barannikov --- runtime/neurun/core/include/graph/Graph.h | 1 - .../neurun/core/src/compiler/ExecutorFactory.cc | 21 +++---- runtime/neurun/core/src/compiler/Linear.cc | 73 +++++++--------------- runtime/neurun/core/src/compiler/Linear.h | 29 +-------- runtime/neurun/core/src/graph/Graph.cc | 5 -- 5 files changed, 35 insertions(+), 94 deletions(-) diff --git a/runtime/neurun/core/include/graph/Graph.h b/runtime/neurun/core/include/graph/Graph.h index ee1203b..5a86555 100644 --- a/runtime/neurun/core/include/graph/Graph.h +++ b/runtime/neurun/core/include/graph/Graph.h @@ -191,7 +191,6 @@ public: } const model::Subgraphs *subgraphs() const { return _subgraphs.get(); } void setBackendResolver(std::unique_ptr &&br); - std::unique_ptr releaseBackendResolver(); private: std::unique_ptr _backend_resolver; diff --git a/runtime/neurun/core/src/compiler/ExecutorFactory.cc b/runtime/neurun/core/src/compiler/ExecutorFactory.cc index 745b9dd..f0fd7bf 100644 --- a/runtime/neurun/core/src/compiler/ExecutorFactory.cc +++ b/runtime/neurun/core/src/compiler/ExecutorFactory.cc @@ -65,9 +65,7 @@ exec::IExecutor *ExecutorFactory::createLinearExecutor(graph::Graph &graph) // linearize assert(!graph.isBuildingPhase()); - auto linear = - nnfw::cpp14::make_unique(graph.shareModel(), graph.releaseSubgraphs(), - graph.releaseLowerInfo(), graph.releaseBackendResolver()); + auto linear = nnfw::cpp14::make_unique(graph); // Dump ops linear->accept(neurun::graph::dumper::Dumper{}); @@ -101,14 +99,14 @@ exec::IExecutor *ExecutorFactory::createLinearExecutor(graph::Graph &graph) // Fix shapes linear->iterate([&](const compiler::Linear::Element &element) { auto backend = element.lower_info->backend(); - auto shape_fixer = linear->getBackendContext(backend)->shape_fixer; - shape_fixer->setLowerInfoMap(linear->getLowerInfo()); + auto shape_fixer = graph.backend_resolver()->getBackendContext(backend)->shape_fixer; + shape_fixer->setLowerInfoMap(graph.getLowerInfo()); shape_fixer->fix(*element.subgraph); }); linear->planTensors(); - auto tensor_builders = linear->backend_resolver()->tensor_builders(); + auto tensor_builders = graph.backend_resolver()->tensor_builders(); // Prepare tensors for (auto &tensor_builder : tensor_builders) @@ -142,7 +140,7 @@ exec::IExecutor *ExecutorFactory::createLinearExecutor(graph::Graph &graph) // Generate kernels linear->iterate([&](const compiler::Linear::Element &element) { auto backend = element.lower_info->backend(); - auto kernel_gen = linear->getBackendContext(backend)->kernel_gen; + auto kernel_gen = graph.backend_resolver()->getBackendContext(backend)->kernel_gen; kernel_gen->generate(*element.subgraph, execution_builder.get()); }); @@ -156,7 +154,7 @@ exec::IExecutor *ExecutorFactory::createLinearExecutor(graph::Graph &graph) // Initialize constant tensors for (const auto backend : backend::BackendManager::get().getAll()) { - linear->getBackendContext(backend)->constant_initializer->run(); + graph.backend_resolver()->getBackendContext(backend)->constant_initializer->run(); } function_sequence->iterate([&](exec::IFunction &ifunc) { @@ -195,10 +193,9 @@ exec::IExecutor *ExecutorFactory::createLinearExecutor(graph::Graph &graph) tensor_mgrs->insert(tensor_builder->releaseTensorManager()); } - return new exec::LinearExecutor{graph.shareModel(), linear->releaseSubgraphs(), - operand_context, linear->releaseLowerInfo(), - std::move(tensor_mgrs), linear->releaseElements(), - function_sequence}; + return new exec::LinearExecutor{ + graph.shareModel(), graph.releaseSubgraphs(), operand_context, graph.releaseLowerInfo(), + std::move(tensor_mgrs), linear->releaseElements(), function_sequence}; } exec::IExecutor *ExecutorFactory::createDataflowExecutor(graph::Graph &graph, bool parallel) diff --git a/runtime/neurun/core/src/compiler/Linear.cc b/runtime/neurun/core/src/compiler/Linear.cc index db75893..901abbc 100644 --- a/runtime/neurun/core/src/compiler/Linear.cc +++ b/runtime/neurun/core/src/compiler/Linear.cc @@ -18,15 +18,11 @@ #include "Linear.h" -#include "graph/operation/LowerInfo.h" -#include "graph/operand/LowerInfo.h" #include "backend/IShapeFixer.h" #include "backend/IConfig.h" #include "backend/IConstantInitializer.h" #include "backend/Backend.h" #include "compiler/SubTensorInfo.h" -#include "model/OperandInfo.h" -#include "model/OperandIndexMap.h" #include "util/logging.h" @@ -35,18 +31,13 @@ namespace neurun namespace compiler { -Linear::Linear(const std::shared_ptr &model, - std::unique_ptr subgraphs, - std::unique_ptr lower_info_map, - std::unique_ptr backend_resolver) - : _model(model), _subgraphs{std::move(subgraphs)}, _lower_info_map{std::move(lower_info_map)}, - _backend_resolver{std::move(backend_resolver)} +Linear::Linear(graph::Graph &graph) : _graph(graph) { - assert(_model && _subgraphs && _lower_info_map); - // Get SubgraphSequence by topological sorting { - // _subgraphs can't access a subgraph by an operand so that input_to_subgs can offer it + model::Subgraphs &subgraphs = _graph.subgraphs(); + model::Operands &operands = _graph.operands(); + // subgraphs can't access a subgraph by an operand so that input_to_subgs can offer it std::unordered_map> input_to_subgs; // Get the relations between input/subgraph to be used for dfs-post-iter @@ -64,11 +55,11 @@ Linear::Linear(const std::shared_ptr &model, // [SUBG3] // | // [4] - _subgraphs->iterate([&](const model::SubgraphIndex &subg_idx, model::Subgraph &subg) { + subgraphs.iterate([&](const model::SubgraphIndex &subg_idx, model::Subgraph &subg) { for (auto input : subg.getInputs()) { // only valid_inputs - const auto &operand = _model->operands.at(input); + const auto &operand = operands.at(input); if (operand.isConstant()) continue; @@ -86,7 +77,7 @@ Linear::Linear(const std::shared_ptr &model, }); std::unordered_map visited; - _subgraphs->iterate([&](const model::SubgraphIndex &index, const model::Subgraph &) { + subgraphs.iterate([&](const model::SubgraphIndex &index, const model::Subgraph &) { visited[index] = false; }); @@ -105,16 +96,16 @@ Linear::Linear(const std::shared_ptr &model, const auto &subg_index_list = it->second; for (const auto &index : subg_index_list) { - auto &subg = _subgraphs->at(index); + auto &subg = subgraphs.at(index); dfs_recursive(index, subg); } } } - _elements.emplace_back(&_subgraphs->at(index), getLowerInfo(index)); + _elements.emplace_back(&subgraphs.at(index), _graph.getLowerInfo(index)); }; - _subgraphs->iterate(dfs_recursive); + subgraphs.iterate(dfs_recursive); // All of the nodes must have been visited. assert( @@ -167,12 +158,12 @@ void Linear::planTensors() model::OperandIndexSequence constants; // Prepare scanning - _model->operands.iterate([&](const model::OperandIndex &ind, const model::Operand &obj) { - const auto lower_info = getLowerInfo(ind); + _graph.operands().iterate([&](const model::OperandIndex &ind, const model::Operand &obj) { + const auto lower_info = _graph.getLowerInfo(ind); // TODO Remove if neurun doesn't support anymore such as // GeneratedTests.reshape_quant8_weights_as_inputs if (lower_info->def_factors().size() == 0 && lower_info->use_factors().size() == 0 && - _model->inputs.contains(ind) == false) + !_graph.getInputs().contains(ind)) { VERBOSE(LINEAR) << "Operand #" << ind.value() << " will be not used. no more process." << std::endl; @@ -188,11 +179,12 @@ void Linear::planTensors() constants.append(ind); } + model::Subgraphs &subgraphs = _graph.subgraphs(); for (auto factor : lower_info->def_factors()) { bool isSubTensor = false; auto backend = factor.backend(); - auto tensor_builder = _backend_resolver->getBackendContext(backend)->tensor_builder; + auto tensor_builder = _graph.backend_resolver()->getBackendContext(backend)->tensor_builder; if (backend->config()->SupportSubTensorAlloc()) { @@ -215,11 +207,11 @@ void Linear::planTensors() // NOTE This assumes an operand can have one layout, and only Permutate can have // different layouts for input and output const auto &def = *obj.getDef().list().cbegin(); - auto frontend_layout = _subgraphs->at(_subgraphs->getOperation(def)).getLayout(); + auto frontend_layout = subgraphs.at(subgraphs.getOperation(def)).getLayout(); if (frontend_layout == model::Layout::UNKNOWN) { const auto &use = *obj.getUses().list().cbegin(); - frontend_layout = _subgraphs->at(_subgraphs->getOperation(use)).getLayout(); + frontend_layout = subgraphs.at(subgraphs.getOperation(use)).getLayout(); } const auto backend_layout = lower_info->def_factors().getOnlyElement().layout(); tensor_builder->registerTensorInfo(ind, info, frontend_layout, backend_layout, is_const); @@ -231,7 +223,7 @@ void Linear::planTensors() // If a tensor is model output, increase the use of the tensor. // This aim is same to above one. - for (const auto &ind : _model->outputs) + for (const auto &ind : _graph.getOutputs()) { uses_map[ind]++; } @@ -250,7 +242,7 @@ void Linear::planTensors() // Allocate Model's inputs VERBOSE(LINEAR) << "TENSORS as MODEL INPUT" << std::endl; - for (const auto &ind : _model->inputs) + for (const auto &ind : _graph.getInputs()) { auto tensor_builder = tensor_builder_map[ind]; if (!tensor_builder) // for GeneratedTests.xxx_weights_as_inputs @@ -290,7 +282,7 @@ void Linear::planTensors() } // Dispose and validate - for (const auto &ind : _model->outputs) + for (const auto &ind : _graph.getOutputs()) { --uses_map[ind]; assert(uses_map[ind] == 0); @@ -326,30 +318,11 @@ void Linear::generateConstantInitializers(void) const iterate([&](const compiler::Linear::Element &element) { auto backend = element.lower_info->backend(); - auto constant_initializer = _backend_resolver->getBackendContext(backend)->constant_initializer; - constant_initializer->generate(*element.subgraph, _model->operands); + auto constant_initializer = + _graph.backend_resolver()->getBackendContext(backend)->constant_initializer; + constant_initializer->generate(*element.subgraph, _graph.operands()); }); } -const graph::operation::LowerInfo *Linear::getLowerInfo(const model::SubgraphIndex &index) const -{ - if (!_lower_info_map) - return nullptr; - auto itr = _lower_info_map->operation.find(index); - if (itr == _lower_info_map->operation.end()) - return nullptr; - return itr->second.get(); -} - -const graph::operand::LowerInfo *Linear::getLowerInfo(const model::OperandIndex &index) const -{ - if (!_lower_info_map) - return nullptr; - auto itr = _lower_info_map->operand.find(index); - if (itr == _lower_info_map->operand.end()) - return nullptr; - return itr->second.get(); -} - } // namespace compiler } // namespace neurun diff --git a/runtime/neurun/core/src/compiler/Linear.h b/runtime/neurun/core/src/compiler/Linear.h index c60a441..eb2ca2c 100644 --- a/runtime/neurun/core/src/compiler/Linear.h +++ b/runtime/neurun/core/src/compiler/Linear.h @@ -23,7 +23,7 @@ #include "model/Model.h" #include "model/Subgraphs.h" #include "backend/ITensorBuilder.h" -#include "graph/LowerInfoMap.h" +#include "graph/Graph.h" #include "compiler/BackendResolver.h" namespace neurun @@ -58,10 +58,7 @@ public: }; public: - Linear(const std::shared_ptr &model, - std::unique_ptr subgraphs, - std::unique_ptr lower_info_map, - std::unique_ptr backend_resolver); + Linear(graph::Graph &graph); public: Linear(const Linear &linear) = delete; @@ -75,31 +72,11 @@ public: void generateConstantInitializers(void) const; - std::unique_ptr releaseLowerInfo() { return std::move(_lower_info_map); } - const graph::LowerInfoMap *getLowerInfo() { return _lower_info_map.get(); } - - std::unique_ptr releaseSubgraphs() { return std::move(_subgraphs); } - std::vector &&releaseElements() { return std::move(_elements); } - const backend::BackendContext *getBackendContext(const backend::Backend *backend) - { - return _backend_resolver->getBackendContext(backend); - } - - const compiler::BackendResolver *backend_resolver() const { return _backend_resolver.get(); } - -private: - // TODO Replace these getLowerInfo methods with ones of LowerInfoMap in the future - const graph::operation::LowerInfo *getLowerInfo(const model::SubgraphIndex &index) const; - const graph::operand::LowerInfo *getLowerInfo(const model::OperandIndex &index) const; - private: - std::shared_ptr _model; - std::unique_ptr _subgraphs; - std::unique_ptr _lower_info_map; + graph::Graph &_graph; std::vector _elements; - std::unique_ptr _backend_resolver; }; } // namespace compiler diff --git a/runtime/neurun/core/src/graph/Graph.cc b/runtime/neurun/core/src/graph/Graph.cc index b3d70b1..91370b4 100644 --- a/runtime/neurun/core/src/graph/Graph.cc +++ b/runtime/neurun/core/src/graph/Graph.cc @@ -609,10 +609,5 @@ void Graph::setBackendResolver(std::unique_ptr &&br) _backend_resolver = std::move(br); } -std::unique_ptr Graph::releaseBackendResolver() -{ - return std::move(_backend_resolver); -} - } // namespace graph } // namespace neurun -- 2.7.4