From 2ec8a2716fb5ff99134eb1cbdc7c7cf6d3a0ad00 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EA=B9=80=EC=88=98=EC=A7=84/=EB=8F=99=EC=9E=91=EC=A0=9C?= =?utf8?q?=EC=96=B4Lab=28SR=29/Engineer/=EC=82=BC=EC=84=B1=EC=A0=84?= =?utf8?q?=EC=9E=90?= Date: Tue, 4 Sep 2018 15:03:23 +0900 Subject: [PATCH] [neurun] Do BackendResolver in lowering (#2563) * [neurun] Do BackendResolver in lowering This commit moves doing BackendResolver into lowring in graph. - TensorBuilder doesn't get Plan in constructor anymore. - LowerInfo includes Backend class not backend string. - BackendResolver returns only Backend class. Signed-off-by: sjsujinkim sjsujin.kim@samsung.com * Change backend() to getBackend() --- runtimes/neurun/src/backend/ITensorBuilder.h | 4 +- .../neurun/src/backend/acl_cl/TensorBuilder.cc | 7 ++- runtimes/neurun/src/backend/acl_cl/TensorBuilder.h | 7 +-- runtimes/neurun/src/backend/cpu/TensorBuilder.cc | 7 ++- runtimes/neurun/src/backend/cpu/TensorBuilder.h | 7 +-- runtimes/neurun/src/codegen/BackendResolver.cc | 28 +-------- runtimes/neurun/src/codegen/BackendResolver.h | 17 +++--- runtimes/neurun/src/codegen/IPlanBuilder.h | 1 + runtimes/neurun/src/codegen/PlanBuilder.cc | 15 +++-- runtimes/neurun/src/codegen/PlanBuilder.h | 7 ++- runtimes/neurun/src/codegen/Planner.cc | 68 +++++++++++++++++++--- runtimes/neurun/src/codegen/Planner.h | 6 +- runtimes/neurun/src/compilation.cc | 8 +-- runtimes/neurun/src/graph/Graph.cc | 6 +- runtimes/neurun/src/graph/operation/LowerInfo.cc | 2 +- runtimes/neurun/src/graph/operation/LowerInfo.h | 8 ++- runtimes/neurun/src/internal/BackendManager.cc | 23 ++++++-- runtimes/neurun/src/internal/BackendManager.h | 22 ++++--- runtimes/neurun/src/linear/Linear.cc | 5 +- runtimes/neurun/src/linear/Linear.h | 2 +- 20 files changed, 152 insertions(+), 98 deletions(-) diff --git a/runtimes/neurun/src/backend/ITensorBuilder.h b/runtimes/neurun/src/backend/ITensorBuilder.h index e67fe06..8884ae3 100644 --- a/runtimes/neurun/src/backend/ITensorBuilder.h +++ b/runtimes/neurun/src/backend/ITensorBuilder.h @@ -5,6 +5,7 @@ #include #include "graph/operand/Index.h" +#include "codegen/Plan.h" namespace neurun { @@ -16,7 +17,8 @@ struct ITensorBuilder virtual ~ITensorBuilder(void) = default; virtual void mark(const ::neurun::graph::operand::Index &ind) = 0; // TODO Add an interface for adding subsumption info - virtual void prepare(const std::map &tensor_info_ctx) = 0; + virtual void prepare(codegen::Plan &plan, + const std::map &tensor_info_ctx) = 0; virtual void allocate(void) = 0; }; diff --git a/runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc b/runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc index 87cec49..4ff2efd 100644 --- a/runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc +++ b/runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc @@ -11,7 +11,7 @@ namespace backend namespace acl_cl { -TensorBuilder::TensorBuilder(codegen::Plan &plan) : _plan(plan) +TensorBuilder::TensorBuilder() { // DO NOTHING } @@ -23,7 +23,8 @@ void TensorBuilder::mark(const ::neurun::graph::operand::Index &ind) _inds.insert(ind.asInt()); } -void TensorBuilder::prepare(const std::map &tensor_info_ctx) +void TensorBuilder::prepare(codegen::Plan &plan, + const std::map &tensor_info_ctx) { assert(_tensors.size() == 0); @@ -35,7 +36,7 @@ void TensorBuilder::prepare(const std::map &tens ::neurun::graph::operand::Index ind{ind_int}; auto tensor = std::make_shared<::arm_compute::CLTensor>(); tensor->allocator()->init(tensor_info_ctx.at(ind.asInt())); - _plan.operands().set(ind, std::make_shared(tensor)); + plan.operands().set(ind, std::make_shared(tensor)); _tensors[ind.asInt()] = tensor; } } diff --git a/runtimes/neurun/src/backend/acl_cl/TensorBuilder.h b/runtimes/neurun/src/backend/acl_cl/TensorBuilder.h index 4790da7..c593416 100644 --- a/runtimes/neurun/src/backend/acl_cl/TensorBuilder.h +++ b/runtimes/neurun/src/backend/acl_cl/TensorBuilder.h @@ -2,7 +2,6 @@ #define __NEURUN_BACKEND_ACL_CL_TENSOR_BUILDER_H__ #include "backend/ITensorBuilder.h" -#include "codegen/Plan.h" #include #include @@ -21,16 +20,16 @@ class Plan; class TensorBuilder : public ITensorBuilder { public: - TensorBuilder(codegen::Plan &plan); + TensorBuilder(); virtual void mark(const ::neurun::graph::operand::Index &ind) override; - virtual void prepare(const std::map &tensor_info_ctx) override; + virtual void prepare(codegen::Plan &plan, + const std::map &tensor_info_ctx) override; virtual void allocate(void) override; std::shared_ptr<::arm_compute::CLTensor> at(const ::neurun::graph::operand::Index &ind); private: - codegen::Plan &_plan; std::unordered_set _inds; std::unordered_map> _tensors; }; diff --git a/runtimes/neurun/src/backend/cpu/TensorBuilder.cc b/runtimes/neurun/src/backend/cpu/TensorBuilder.cc index 3b7f446..f25a954 100644 --- a/runtimes/neurun/src/backend/cpu/TensorBuilder.cc +++ b/runtimes/neurun/src/backend/cpu/TensorBuilder.cc @@ -11,7 +11,7 @@ namespace backend namespace cpu { -TensorBuilder::TensorBuilder(codegen::Plan &plan) : _plan(plan) +TensorBuilder::TensorBuilder() { // DO NOTHING } @@ -23,7 +23,8 @@ void TensorBuilder::mark(const ::neurun::graph::operand::Index &ind) _inds.insert(ind.asInt()); } -void TensorBuilder::prepare(const std::map &tensor_info_ctx) +void TensorBuilder::prepare(codegen::Plan &plan, + const std::map &tensor_info_ctx) { assert(_tensors.size() == 0); @@ -33,7 +34,7 @@ void TensorBuilder::prepare(const std::map &tens auto tensor = std::make_shared(tensor_info_ctx.at(ind.asInt())); // TODO Fix allocation here. When Tensor object is created the memory for tensor is also // allocated, and this must be fixed. - _plan.operands().set(ind, std::make_shared(tensor)); + plan.operands().set(ind, std::make_shared(tensor)); _tensors[ind.asInt()] = tensor; } } diff --git a/runtimes/neurun/src/backend/cpu/TensorBuilder.h b/runtimes/neurun/src/backend/cpu/TensorBuilder.h index 484a3e5..80fa12f 100644 --- a/runtimes/neurun/src/backend/cpu/TensorBuilder.h +++ b/runtimes/neurun/src/backend/cpu/TensorBuilder.h @@ -6,7 +6,6 @@ #include "backend/ITensorBuilder.h" #include "backend/cpu/operand/Tensor.h" -#include "codegen/Plan.h" namespace neurun { @@ -20,16 +19,16 @@ class Plan; class TensorBuilder : public ITensorBuilder { public: - TensorBuilder(codegen::Plan &plan); + TensorBuilder(); virtual void mark(const ::neurun::graph::operand::Index &ind) override; - virtual void prepare(const std::map &tensor_info_ctx) override; + virtual void prepare(codegen::Plan &plan, + const std::map &tensor_info_ctx) override; virtual void allocate(void) override; std::shared_ptr at(const ::neurun::graph::operand::Index &ind); private: - codegen::Plan &_plan; std::unordered_set _inds; std::unordered_map> _tensors; }; diff --git a/runtimes/neurun/src/codegen/BackendResolver.cc b/runtimes/neurun/src/codegen/BackendResolver.cc index eb7a5a8..17f5dcf 100644 --- a/runtimes/neurun/src/codegen/BackendResolver.cc +++ b/runtimes/neurun/src/codegen/BackendResolver.cc @@ -5,33 +5,7 @@ namespace neurun namespace codegen { -std::shared_ptr -BackendResolver::getInitializerGenerator(const std::type_index &type) -{ - return _gen_map.at(type).initializer_gen; -} - -std::shared_ptr -BackendResolver::getStageGenerator(const std::type_index &type) -{ - return _gen_map.at(type).stage_gen; -} - -std::shared_ptr -BackendResolver::getTensorBuilder(const std::type_index &type) -{ - return getStageGenerator(type)->tensor_builder(); -} - -std::set> BackendResolver::getAllTensorBuilders() -{ - std::set> ret; - for (const auto &it : _gen_map) - { - ret.insert(it.second.stage_gen->tensor_builder()); - } - return ret; -} +// NOT IMPLEMENTED } // namespace neurun } // namespace codegen diff --git a/runtimes/neurun/src/codegen/BackendResolver.h b/runtimes/neurun/src/codegen/BackendResolver.h index 8023efa..25c854a 100644 --- a/runtimes/neurun/src/codegen/BackendResolver.h +++ b/runtimes/neurun/src/codegen/BackendResolver.h @@ -19,8 +19,10 @@ namespace codegen class BackendResolver { public: - BackendResolver(::internal::BackendManager &backend_manager) : _backend_manager(backend_manager) + BackendResolver(const neurun::graph::operand::Set &operands) { + _backend_manager = std::make_shared<::internal::BackendManager>(operands); + const auto &backend_all_str = ::nnfw::util::EnvVar{std::string("OP_BACKEND_ALLOPS")}.asString("none"); if (backend_all_str.compare("none") != 0) @@ -28,7 +30,7 @@ public: VERBOSE(BackendResolver) << "Use backend for all ops: " << backend_all_str << std::endl; #define OP(InternalName, NnApiName) \ { \ - auto backend = _backend_manager.get(backend_all_str); \ + auto backend = _backend_manager->get(backend_all_str); \ _gen_map[typeid(graph::operation::InternalName::Node)] = backend; \ } #include "graph/operation/Op.lst" @@ -40,7 +42,7 @@ public: { \ const auto &backend_str = \ ::nnfw::util::EnvVar{std::string("OP_BACKEND_") + #NnApiName}.asString("acl_cl"); \ - auto backend = _backend_manager.get(backend_str); \ + auto backend = _backend_manager->get(backend_str); \ VERBOSE(BackendResolver) << "backend for " << #NnApiName << ": " << backend_str << std::endl; \ _gen_map[typeid(graph::operation::InternalName::Node)] = backend; \ } @@ -50,15 +52,12 @@ public: } } - std::shared_ptr - getInitializerGenerator(const std::type_index &type); - std::shared_ptr getStageGenerator(const std::type_index &type); - std::shared_ptr getTensorBuilder(const std::type_index &type); - std::set> getAllTensorBuilders(); +public: + const ::internal::Backend &getBackend(const std::type_index &type) { return _gen_map[type]; } private: std::unordered_map _gen_map; - ::internal::BackendManager &_backend_manager; + std::shared_ptr<::internal::BackendManager> _backend_manager; }; } // namespace codegen diff --git a/runtimes/neurun/src/codegen/IPlanBuilder.h b/runtimes/neurun/src/codegen/IPlanBuilder.h index fc9cc36..1a6dd9d 100644 --- a/runtimes/neurun/src/codegen/IPlanBuilder.h +++ b/runtimes/neurun/src/codegen/IPlanBuilder.h @@ -16,6 +16,7 @@ struct IPlanBuilder virtual void addShapeConstr(const ::neurun::graph::operand::Index &ind, const ::arm_compute::TensorInfo &info) = 0; + virtual void addTensorBuilder(std::shared_ptr tensor_builder) = 0; virtual void addInitializer(const ::neurun::graph::operand::Index &ind, const Initializer &initializer) = 0; virtual void addStage(const Stage &) = 0; diff --git a/runtimes/neurun/src/codegen/PlanBuilder.cc b/runtimes/neurun/src/codegen/PlanBuilder.cc index 5471218..cba9046 100644 --- a/runtimes/neurun/src/codegen/PlanBuilder.cc +++ b/runtimes/neurun/src/codegen/PlanBuilder.cc @@ -11,6 +11,11 @@ void PlanBuilder::addShapeConstr(const ::neurun::graph::operand::Index &ind, _tensor_info_ctx[ind.asInt()] = info; } +void PlanBuilder::addTensorBuilder(std::shared_ptr tensor_builder) +{ + _tensor_builders.insert(tensor_builder); +} + void PlanBuilder::addInitializer(const ::neurun::graph::operand::Index &ind, const Initializer &initializer) { @@ -19,14 +24,12 @@ void PlanBuilder::addInitializer(const ::neurun::graph::operand::Index &ind, void PlanBuilder::addStage(const Stage &stage) { _stages.emplace_back(stage); } -void PlanBuilder::finalize(BackendResolver &backend_resolver) +void PlanBuilder::finalize() { - auto tensor_builders = backend_resolver.getAllTensorBuilders(); - // Prepare tensors - for (auto &tensor_builder : tensor_builders) + for (auto &tensor_builder : _tensor_builders) { - tensor_builder->prepare(_tensor_info_ctx); + tensor_builder->prepare(_plan, _tensor_info_ctx); } // Process Stage @@ -39,7 +42,7 @@ void PlanBuilder::finalize(BackendResolver &backend_resolver) // TODO Add code for CPU/ACL tensor allocation // Allocate Tensor Memory for cl_tensors - for (auto &tensor_builder : tensor_builders) + for (auto &tensor_builder : _tensor_builders) { tensor_builder->allocate(); } diff --git a/runtimes/neurun/src/codegen/PlanBuilder.h b/runtimes/neurun/src/codegen/PlanBuilder.h index 2f848e2..d074abf 100644 --- a/runtimes/neurun/src/codegen/PlanBuilder.h +++ b/runtimes/neurun/src/codegen/PlanBuilder.h @@ -5,6 +5,7 @@ #include "codegen/Plan.h" #include "codegen/BackendResolver.h" #include "backend/IStageGenerator.h" +#include "backend/ITensorBuilder.h" namespace neurun { @@ -42,6 +43,9 @@ public: const ::arm_compute::TensorInfo &info) override; public: + void addTensorBuilder(std::shared_ptr tensor_builder) override; + +public: void addInitializer(const ::neurun::graph::operand::Index &ind, const Initializer &initializer) override; @@ -49,7 +53,7 @@ public: void addStage(const Stage &stage) override; public: - void finalize(BackendResolver &backend_resolver); + void finalize(); public: const std::map &tensor_info_ctx() { return _tensor_info_ctx; } @@ -60,6 +64,7 @@ private: private: std::map _tensor_info_ctx; std::map _initializer_ctx; + std::set> _tensor_builders; std::vector _stages; }; diff --git a/runtimes/neurun/src/codegen/Planner.cc b/runtimes/neurun/src/codegen/Planner.cc index 53facf4..8d0bf3a 100644 --- a/runtimes/neurun/src/codegen/Planner.cc +++ b/runtimes/neurun/src/codegen/Planner.cc @@ -6,6 +6,7 @@ #include "graph/operand/Set.h" #include "codegen/IPlanBuilder.h" #include "codegen/BackendResolver.h" +#include "graph/operation/LowerInfo.h" namespace neurun { @@ -31,14 +32,21 @@ void Planner::visit(const graph::operation::Conv2D::Implicit::Node &node) _builder.addShapeConstr(ker_index, ::internal::asTensorInfo(ker_shape)); _builder.addShapeConstr(bias_index, ::internal::asTensorInfo(bias_size)); + // backend + auto backend = node.lower_info()->backend(); + // Generate Initializers - auto init_gen = _backend_resolver.getInitializerGenerator(typeid(node)); + auto init_gen = backend.initializer_gen(); _builder.addInitializer(ker_index, init_gen->generateWeight(node)); _builder.addInitializer(bias_index, init_gen->generateBias(node)); // Generate Stage - auto stage_gen = _backend_resolver.getStageGenerator(typeid(node)); + auto stage_gen = backend.stage_gen(); _builder.addStage(stage_gen->generate(node)); + + // Generate TensorBuilder + auto tensor_builder = backend.tensor_builder(); + _builder.addTensorBuilder(tensor_builder); } void Planner::visit(const graph::operation::MaxPool2D::Implicit::Node &node) @@ -53,9 +61,16 @@ void Planner::visit(const graph::operation::MaxPool2D::Implicit::Node &node) _builder.addShapeConstr(ofm_index, ::internal::asTensorInfo(ofm_shape)); _builder.addShapeConstr(ifm_index, ::internal::asTensorInfo(ifm_shape)); + // backend + auto backend = node.lower_info()->backend(); + // Generate Stage - auto stage_gen = _backend_resolver.getStageGenerator(typeid(node)); + auto stage_gen = backend.stage_gen(); _builder.addStage(stage_gen->generate(node)); + + // Generate TensorBuilder + auto tensor_builder = backend.tensor_builder(); + _builder.addTensorBuilder(tensor_builder); } void Planner::visit(const graph::operation::AvgPool2D::Implicit::Node &node) @@ -70,9 +85,16 @@ void Planner::visit(const graph::operation::AvgPool2D::Implicit::Node &node) _builder.addShapeConstr(ofm_index, ::internal::asTensorInfo(ofm_shape)); _builder.addShapeConstr(ifm_index, ::internal::asTensorInfo(ifm_shape)); + // backend + auto backend = node.lower_info()->backend(); + // Generate Stage - auto stage_gen = _backend_resolver.getStageGenerator(typeid(node)); + auto stage_gen = backend.stage_gen(); _builder.addStage(stage_gen->generate(node)); + + // Generate TensorBuilder + auto tensor_builder = backend.tensor_builder(); + _builder.addTensorBuilder(tensor_builder); } void Planner::visit(const graph::operation::Concat::Node &node) @@ -99,9 +121,16 @@ void Planner::visit(const graph::operation::Concat::Node &node) _builder.addShapeConstr(ifm_index, ::internal::asTensorInfo(ifm_shape)); } + // backend + auto backend = node.lower_info()->backend(); + // Generate Stage - auto stage_gen = _backend_resolver.getStageGenerator(typeid(node)); + auto stage_gen = backend.stage_gen(); _builder.addStage(stage_gen->generate(node)); + + // Generate TensorBuilder + auto tensor_builder = backend.tensor_builder(); + _builder.addTensorBuilder(tensor_builder); } void Planner::visit(const graph::operation::FullyConnected::Node &node) @@ -137,14 +166,21 @@ void Planner::visit(const graph::operation::FullyConnected::Node &node) ::internal::asTensorInfo(num_output /*H*/, input_size /*W*/)); _builder.addShapeConstr(bias_index, ::internal::asTensorInfo(bias_size)); + // backend + auto backend = node.lower_info()->backend(); + // Generate Initializers - auto init_gen = _backend_resolver.getInitializerGenerator(typeid(node)); + auto init_gen = backend.initializer_gen(); _builder.addInitializer(weight_index, init_gen->generateWeight(node)); _builder.addInitializer(bias_index, init_gen->generateBias(node)); // Generate Stage - auto stage_gen = _backend_resolver.getStageGenerator(typeid(node)); + auto stage_gen = backend.stage_gen(); _builder.addStage(stage_gen->generate(node)); + + // Generate TensorBuilder + auto tensor_builder = backend.tensor_builder(); + _builder.addTensorBuilder(tensor_builder); } void Planner::visit(const graph::operation::Reshape::Node &node) @@ -172,9 +208,16 @@ void Planner::visit(const graph::operation::Reshape::Node &node) _builder.addShapeConstr(output_index, ::internal::asTensorInfo(out_size)); _builder.addShapeConstr(input_index, ::internal::asTensorInfo(ifm_shape)); + // backend + auto backend = node.lower_info()->backend(); + // Generate Stage - auto stage_gen = _backend_resolver.getStageGenerator(typeid(node)); + auto stage_gen = backend.stage_gen(); _builder.addStage(stage_gen->generate(node)); + + // Generate TensorBuilder + auto tensor_builder = backend.tensor_builder(); + _builder.addTensorBuilder(tensor_builder); } void Planner::visit(const graph::operation::Softmax::Node &node) @@ -197,9 +240,16 @@ void Planner::visit(const graph::operation::Softmax::Node &node) _builder.addShapeConstr(output_index, ::internal::asTensorInfo(len)); _builder.addShapeConstr(input_index, ::internal::asTensorInfo(len)); + // backend + auto backend = node.lower_info()->backend(); + // Generate Stage - auto stage_gen = _backend_resolver.getStageGenerator(typeid(node)); + auto stage_gen = backend.stage_gen(); _builder.addStage(stage_gen->generate(node)); + + // Generate TensorBuilder + auto tensor_builder = backend.tensor_builder(); + _builder.addTensorBuilder(tensor_builder); } void Planner::visit(const graph::operation::NOP::Node & /* node */) diff --git a/runtimes/neurun/src/codegen/Planner.h b/runtimes/neurun/src/codegen/Planner.h index cad2f2e..05e4f44 100644 --- a/runtimes/neurun/src/codegen/Planner.h +++ b/runtimes/neurun/src/codegen/Planner.h @@ -25,9 +25,8 @@ class BackendResolver; class Planner : public graph::operation::NodeVisitor { public: - Planner(const neurun::graph::operand::Set &ctx, neurun::codegen::IPlanBuilder &builder, - neurun::codegen::BackendResolver &backend_resolver) - : _ctx{ctx}, _builder{builder}, _backend_resolver(backend_resolver) + Planner(const neurun::graph::operand::Set &ctx, neurun::codegen::IPlanBuilder &builder) + : _ctx{ctx}, _builder{builder} { } @@ -44,7 +43,6 @@ public: private: const neurun::graph::operand::Set &_ctx; neurun::codegen::IPlanBuilder &_builder; - neurun::codegen::BackendResolver &_backend_resolver; }; } // namespace codegen diff --git a/runtimes/neurun/src/compilation.cc b/runtimes/neurun/src/compilation.cc index 7e9bff6..1e1cf3f 100644 --- a/runtimes/neurun/src/compilation.cc +++ b/runtimes/neurun/src/compilation.cc @@ -38,16 +38,14 @@ int ANeuralNetworksCompilation::finish() // Dump ops linear->accept(neurun::codegen::Dumper{}); - ::internal::BackendManager backend_manager{plan}; - neurun::codegen::BackendResolver backend_resolver{backend_manager}; neurun::codegen::PlanBuilder plan_builder{plan}; - linear->markTensors(backend_resolver); + linear->markTensors(); - linear->accept(neurun::codegen::Planner{operands, plan_builder, backend_resolver}); + linear->accept(neurun::codegen::Planner{operands, plan_builder}); // TODO Add optimization passes - plan_builder.finalize(backend_resolver); + plan_builder.finalize(); return ANEURALNETWORKS_NO_ERROR; } diff --git a/runtimes/neurun/src/graph/Graph.cc b/runtimes/neurun/src/graph/Graph.cc index ef97c84..531ef6d 100644 --- a/runtimes/neurun/src/graph/Graph.cc +++ b/runtimes/neurun/src/graph/Graph.cc @@ -8,6 +8,7 @@ #include "nnfw/std/memory.h" #include "linear/Linear.h" #include "operation/LowerInfo.h" +#include "codegen/BackendResolver.h" namespace neurun { @@ -110,9 +111,10 @@ void Graph::lower(void) // Lower { + auto _backend_resolver = neurun::codegen::BackendResolver(_operands); _operations.iterate([&](const operation::Index &, operation::Node &node) { - // TODO Update backend id accordingly. Currently "acl_cl" by default - node.lower_info(nnfw::make_unique(std::string("acl_cl"))); + auto backend = _backend_resolver.getBackend(typeid(node)); + node.lower_info(nnfw::make_unique(backend)); }); } diff --git a/runtimes/neurun/src/graph/operation/LowerInfo.cc b/runtimes/neurun/src/graph/operation/LowerInfo.cc index c59bd1c..e8c19c8 100644 --- a/runtimes/neurun/src/graph/operation/LowerInfo.cc +++ b/runtimes/neurun/src/graph/operation/LowerInfo.cc @@ -7,7 +7,7 @@ namespace graph namespace operation { -LowerInfo::LowerInfo(const std::string &backend_id) : _backend_id(backend_id) +LowerInfo::LowerInfo(const internal::Backend &backend) : _backend(backend) { // DO NOTHING } diff --git a/runtimes/neurun/src/graph/operation/LowerInfo.h b/runtimes/neurun/src/graph/operation/LowerInfo.h index c524b00..56d1aa1 100644 --- a/runtimes/neurun/src/graph/operation/LowerInfo.h +++ b/runtimes/neurun/src/graph/operation/LowerInfo.h @@ -3,6 +3,8 @@ #include +#include "internal/BackendManager.h" + namespace neurun { namespace graph @@ -13,11 +15,11 @@ namespace operation class LowerInfo { public: - LowerInfo(const std::string &backend_id); - const std::string &backend_id() const { return _backend_id; } + LowerInfo(const internal::Backend &backend); + const internal::Backend &backend() const { return _backend; } private: - const std::string _backend_id; + internal::Backend _backend; }; } // namespace operation diff --git a/runtimes/neurun/src/internal/BackendManager.cc b/runtimes/neurun/src/internal/BackendManager.cc index 292b751..e2781ce 100644 --- a/runtimes/neurun/src/internal/BackendManager.cc +++ b/runtimes/neurun/src/internal/BackendManager.cc @@ -15,20 +15,33 @@ namespace internal Backend::Backend(const std::shared_ptr &backend_initializer, const std::shared_ptr &initializer_gen, const std::shared_ptr &stage_gen) - : initializer_gen(initializer_gen), stage_gen(stage_gen) + : _initializer_gen(initializer_gen), _stage_gen(stage_gen) { backend_initializer->initialize(); } -BackendManager::BackendManager(neurun::codegen::Plan &plan) : _plan(plan) +const std::shared_ptr Backend::initializer_gen() const { - const auto &operands = _plan.model().operands(); + return _initializer_gen; +} + +const std::shared_ptr Backend::stage_gen() const +{ + return _stage_gen; +} +const std::shared_ptr Backend::tensor_builder() const +{ + return _stage_gen->tensor_builder(); +} + +BackendManager::BackendManager(const neurun::graph::operand::Set &operands) +{ // Add arm_compute backend { using namespace ::neurun::backend::acl_cl; auto acl_backend_initializer = std::make_shared(); - auto acl_tensor_builder = std::make_shared(_plan); + auto acl_tensor_builder = std::make_shared(); auto acl_initializer_gen = std::make_shared(operands); auto acl_stage_gen = std::make_shared(operands, acl_tensor_builder); @@ -40,7 +53,7 @@ BackendManager::BackendManager(neurun::codegen::Plan &plan) : _plan(plan) { using namespace ::neurun::backend::cpu; auto cpu_backend_initializer = std::make_shared(); - auto cpu_tensor_builder = std::make_shared(_plan); + auto cpu_tensor_builder = std::make_shared(); auto cpu_initializer_gen = std::make_shared(operands); auto cpu_stage_gen = std::make_shared(operands, cpu_tensor_builder); diff --git a/runtimes/neurun/src/internal/BackendManager.h b/runtimes/neurun/src/internal/BackendManager.h index a5f51db..1585cce 100644 --- a/runtimes/neurun/src/internal/BackendManager.h +++ b/runtimes/neurun/src/internal/BackendManager.h @@ -3,7 +3,7 @@ #include -#include "codegen/Plan.h" +#include "graph/operand/Set.h" namespace neurun { @@ -21,30 +21,36 @@ struct ITensorBuilder; namespace internal { -struct Backend +class Backend { - std::shared_ptr initializer_gen; - std::shared_ptr stage_gen; - +public: Backend(const std::shared_ptr &backend_initializer, const std::shared_ptr &initializer_gen, const std::shared_ptr &stage_gen); - Backend(void) : initializer_gen(nullptr), stage_gen(nullptr) + Backend(void) : _initializer_gen(nullptr), _stage_gen(nullptr) { // DO NOTHING } + +public: + const std::shared_ptr initializer_gen() const; + const std::shared_ptr stage_gen() const; + const std::shared_ptr tensor_builder() const; + +private: + std::shared_ptr _initializer_gen; + std::shared_ptr _stage_gen; }; class BackendManager { public: - BackendManager(neurun::codegen::Plan &plan); + BackendManager(const neurun::graph::operand::Set &operands); Backend get(const std::string &key); private: - neurun::codegen::Plan &_plan; std::map _gen_map; }; diff --git a/runtimes/neurun/src/linear/Linear.cc b/runtimes/neurun/src/linear/Linear.cc index a07ceda..e898b73 100644 --- a/runtimes/neurun/src/linear/Linear.cc +++ b/runtimes/neurun/src/linear/Linear.cc @@ -3,6 +3,7 @@ #include "graph/Graph.h" #include "codegen/BackendResolver.h" +#include "graph/operation/LowerInfo.h" namespace neurun { @@ -32,11 +33,11 @@ void Linear::accept(graph::operation::NodeVisitor &&visitor) const } } -void Linear::markTensors(neurun::codegen::BackendResolver &resolver) const +void Linear::markTensors() const { for (const auto op : _operations) { - auto tensor_builder = resolver.getTensorBuilder(typeid(*op)); + const auto tensor_builder = op->lower_info()->backend().stage_gen()->tensor_builder(); for (const auto &ind : op->getInputs()) { tensor_builder->mark(ind); diff --git a/runtimes/neurun/src/linear/Linear.h b/runtimes/neurun/src/linear/Linear.h index a8a9a3e..574a29b 100644 --- a/runtimes/neurun/src/linear/Linear.h +++ b/runtimes/neurun/src/linear/Linear.h @@ -49,7 +49,7 @@ public: void accept(graph::operation::NodeVisitor &&visitor) const; // TODO Remove this since tensor marking will be replaced with another way - virtual void markTensors(neurun::codegen::BackendResolver &) const; + virtual void markTensors() const; public: private: -- 2.7.4