[neurun] Do BackendResolver in lowering (#2563)
author김수진/동작제어Lab(SR)/Engineer/삼성전자 <sjsujin.kim@samsung.com>
Tue, 4 Sep 2018 06:03:23 +0000 (15:03 +0900)
committer오형석/동작제어Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Tue, 4 Sep 2018 06:03:23 +0000 (15:03 +0900)
* [neurun] Do BackendResolver in lowering

This commit moves doing BackendResolver into lowring in graph.

- TensorBuilder doesn't get Plan in constructor anymore.
- LowerInfo includes Backend class not backend string.
- BackendResolver returns only Backend class.

Signed-off-by: sjsujinkim sjsujin.kim@samsung.com
* Change backend() to getBackend()

20 files changed:
runtimes/neurun/src/backend/ITensorBuilder.h
runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc
runtimes/neurun/src/backend/acl_cl/TensorBuilder.h
runtimes/neurun/src/backend/cpu/TensorBuilder.cc
runtimes/neurun/src/backend/cpu/TensorBuilder.h
runtimes/neurun/src/codegen/BackendResolver.cc
runtimes/neurun/src/codegen/BackendResolver.h
runtimes/neurun/src/codegen/IPlanBuilder.h
runtimes/neurun/src/codegen/PlanBuilder.cc
runtimes/neurun/src/codegen/PlanBuilder.h
runtimes/neurun/src/codegen/Planner.cc
runtimes/neurun/src/codegen/Planner.h
runtimes/neurun/src/compilation.cc
runtimes/neurun/src/graph/Graph.cc
runtimes/neurun/src/graph/operation/LowerInfo.cc
runtimes/neurun/src/graph/operation/LowerInfo.h
runtimes/neurun/src/internal/BackendManager.cc
runtimes/neurun/src/internal/BackendManager.h
runtimes/neurun/src/linear/Linear.cc
runtimes/neurun/src/linear/Linear.h

index e67fe06..8884ae3 100644 (file)
@@ -5,6 +5,7 @@
 #include <arm_compute/core/TensorInfo.h>
 
 #include "graph/operand/Index.h"
+#include "codegen/Plan.h"
 
 namespace neurun
 {
@@ -16,7 +17,8 @@ struct ITensorBuilder
   virtual ~ITensorBuilder(void) = default;
   virtual void mark(const ::neurun::graph::operand::Index &ind) = 0;
   // TODO Add an interface for adding subsumption info
-  virtual void prepare(const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx) = 0;
+  virtual void prepare(codegen::Plan &plan,
+                       const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx) = 0;
   virtual void allocate(void) = 0;
 };
 
index 87cec49..4ff2efd 100644 (file)
@@ -11,7 +11,7 @@ namespace backend
 namespace acl_cl
 {
 
-TensorBuilder::TensorBuilder(codegen::Plan &plan) : _plan(plan)
+TensorBuilder::TensorBuilder()
 {
   // DO NOTHING
 }
@@ -23,7 +23,8 @@ void TensorBuilder::mark(const ::neurun::graph::operand::Index &ind)
   _inds.insert(ind.asInt());
 }
 
-void TensorBuilder::prepare(const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx)
+void TensorBuilder::prepare(codegen::Plan &plan,
+                            const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx)
 {
   assert(_tensors.size() == 0);
 
@@ -35,7 +36,7 @@ void TensorBuilder::prepare(const std::map<int, ::arm_compute::TensorInfo> &tens
     ::neurun::graph::operand::Index ind{ind_int};
     auto tensor = std::make_shared<::arm_compute::CLTensor>();
     tensor->allocator()->init(tensor_info_ctx.at(ind.asInt()));
-    _plan.operands().set(ind, std::make_shared<operand::Object>(tensor));
+    plan.operands().set(ind, std::make_shared<operand::Object>(tensor));
     _tensors[ind.asInt()] = tensor;
   }
 }
index 4790da7..c593416 100644 (file)
@@ -2,7 +2,6 @@
 #define __NEURUN_BACKEND_ACL_CL_TENSOR_BUILDER_H__
 
 #include "backend/ITensorBuilder.h"
-#include "codegen/Plan.h"
 
 #include <unordered_map>
 #include <unordered_set>
@@ -21,16 +20,16 @@ class Plan;
 class TensorBuilder : public ITensorBuilder
 {
 public:
-  TensorBuilder(codegen::Plan &plan);
+  TensorBuilder();
 
   virtual void mark(const ::neurun::graph::operand::Index &ind) override;
-  virtual void prepare(const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx) override;
+  virtual void prepare(codegen::Plan &plan,
+                       const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx) override;
   virtual void allocate(void) override;
 
   std::shared_ptr<::arm_compute::CLTensor> at(const ::neurun::graph::operand::Index &ind);
 
 private:
-  codegen::Plan &_plan;
   std::unordered_set<int> _inds;
   std::unordered_map<int, std::shared_ptr<::arm_compute::CLTensor>> _tensors;
 };
index 3b7f446..f25a954 100644 (file)
@@ -11,7 +11,7 @@ namespace backend
 namespace cpu
 {
 
-TensorBuilder::TensorBuilder(codegen::Plan &plan) : _plan(plan)
+TensorBuilder::TensorBuilder()
 {
   // DO NOTHING
 }
@@ -23,7 +23,8 @@ void TensorBuilder::mark(const ::neurun::graph::operand::Index &ind)
   _inds.insert(ind.asInt());
 }
 
-void TensorBuilder::prepare(const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx)
+void TensorBuilder::prepare(codegen::Plan &plan,
+                            const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx)
 {
   assert(_tensors.size() == 0);
 
@@ -33,7 +34,7 @@ void TensorBuilder::prepare(const std::map<int, ::arm_compute::TensorInfo> &tens
     auto tensor = std::make_shared<operand::Tensor>(tensor_info_ctx.at(ind.asInt()));
     // TODO Fix allocation here. When Tensor object is created the memory for tensor is also
     //      allocated, and this must be fixed.
-    _plan.operands().set(ind, std::make_shared<operand::Object>(tensor));
+    plan.operands().set(ind, std::make_shared<operand::Object>(tensor));
     _tensors[ind.asInt()] = tensor;
   }
 }
index 484a3e5..80fa12f 100644 (file)
@@ -6,7 +6,6 @@
 
 #include "backend/ITensorBuilder.h"
 #include "backend/cpu/operand/Tensor.h"
-#include "codegen/Plan.h"
 
 namespace neurun
 {
@@ -20,16 +19,16 @@ class Plan;
 class TensorBuilder : public ITensorBuilder
 {
 public:
-  TensorBuilder(codegen::Plan &plan);
+  TensorBuilder();
 
   virtual void mark(const ::neurun::graph::operand::Index &ind) override;
-  virtual void prepare(const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx) override;
+  virtual void prepare(codegen::Plan &plan,
+                       const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx) override;
   virtual void allocate(void) override;
 
   std::shared_ptr<operand::Tensor> at(const ::neurun::graph::operand::Index &ind);
 
 private:
-  codegen::Plan &_plan;
   std::unordered_set<int> _inds;
   std::unordered_map<int, std::shared_ptr<operand::Tensor>> _tensors;
 };
index eb7a5a8..17f5dcf 100644 (file)
@@ -5,33 +5,7 @@ namespace neurun
 namespace codegen
 {
 
-std::shared_ptr<neurun::backend::IInitializerGenerator>
-BackendResolver::getInitializerGenerator(const std::type_index &type)
-{
-  return _gen_map.at(type).initializer_gen;
-}
-
-std::shared_ptr<neurun::backend::IStageGenerator>
-BackendResolver::getStageGenerator(const std::type_index &type)
-{
-  return _gen_map.at(type).stage_gen;
-}
-
-std::shared_ptr<neurun::backend::ITensorBuilder>
-BackendResolver::getTensorBuilder(const std::type_index &type)
-{
-  return getStageGenerator(type)->tensor_builder();
-}
-
-std::set<std::shared_ptr<neurun::backend::ITensorBuilder>> BackendResolver::getAllTensorBuilders()
-{
-  std::set<std::shared_ptr<neurun::backend::ITensorBuilder>> ret;
-  for (const auto &it : _gen_map)
-  {
-    ret.insert(it.second.stage_gen->tensor_builder());
-  }
-  return ret;
-}
+// NOT IMPLEMENTED
 
 } // namespace neurun
 } // namespace codegen
index 8023efa..25c854a 100644 (file)
@@ -19,8 +19,10 @@ namespace codegen
 class BackendResolver
 {
 public:
-  BackendResolver(::internal::BackendManager &backend_manager) : _backend_manager(backend_manager)
+  BackendResolver(const neurun::graph::operand::Set &operands)
   {
+    _backend_manager = std::make_shared<::internal::BackendManager>(operands);
+
     const auto &backend_all_str =
         ::nnfw::util::EnvVar{std::string("OP_BACKEND_ALLOPS")}.asString("none");
     if (backend_all_str.compare("none") != 0)
@@ -28,7 +30,7 @@ public:
       VERBOSE(BackendResolver) << "Use backend for all ops: " << backend_all_str << std::endl;
 #define OP(InternalName, NnApiName)                                   \
   {                                                                   \
-    auto backend = _backend_manager.get(backend_all_str);             \
+    auto backend = _backend_manager->get(backend_all_str);            \
     _gen_map[typeid(graph::operation::InternalName::Node)] = backend; \
   }
 #include "graph/operation/Op.lst"
@@ -40,7 +42,7 @@ public:
   {                                                                                               \
     const auto &backend_str =                                                                     \
         ::nnfw::util::EnvVar{std::string("OP_BACKEND_") + #NnApiName}.asString("acl_cl");         \
-    auto backend = _backend_manager.get(backend_str);                                             \
+    auto backend = _backend_manager->get(backend_str);                                            \
     VERBOSE(BackendResolver) << "backend for " << #NnApiName << ": " << backend_str << std::endl; \
     _gen_map[typeid(graph::operation::InternalName::Node)] = backend;                             \
   }
@@ -50,15 +52,12 @@ public:
     }
   }
 
-  std::shared_ptr<neurun::backend::IInitializerGenerator>
-  getInitializerGenerator(const std::type_index &type);
-  std::shared_ptr<neurun::backend::IStageGenerator> getStageGenerator(const std::type_index &type);
-  std::shared_ptr<neurun::backend::ITensorBuilder> getTensorBuilder(const std::type_index &type);
-  std::set<std::shared_ptr<neurun::backend::ITensorBuilder>> getAllTensorBuilders();
+public:
+  const ::internal::Backend &getBackend(const std::type_index &type) { return _gen_map[type]; }
 
 private:
   std::unordered_map<std::type_index, ::internal::Backend> _gen_map;
-  ::internal::BackendManager &_backend_manager;
+  std::shared_ptr<::internal::BackendManager> _backend_manager;
 };
 
 } // namespace codegen
index fc9cc36..1a6dd9d 100644 (file)
@@ -16,6 +16,7 @@ struct IPlanBuilder
 
   virtual void addShapeConstr(const ::neurun::graph::operand::Index &ind,
                               const ::arm_compute::TensorInfo &info) = 0;
+  virtual void addTensorBuilder(std::shared_ptr<backend::ITensorBuilder> tensor_builder) = 0;
   virtual void addInitializer(const ::neurun::graph::operand::Index &ind,
                               const Initializer &initializer) = 0;
   virtual void addStage(const Stage &) = 0;
index 5471218..cba9046 100644 (file)
@@ -11,6 +11,11 @@ void PlanBuilder::addShapeConstr(const ::neurun::graph::operand::Index &ind,
   _tensor_info_ctx[ind.asInt()] = info;
 }
 
+void PlanBuilder::addTensorBuilder(std::shared_ptr<backend::ITensorBuilder> tensor_builder)
+{
+  _tensor_builders.insert(tensor_builder);
+}
+
 void PlanBuilder::addInitializer(const ::neurun::graph::operand::Index &ind,
                                  const Initializer &initializer)
 {
@@ -19,14 +24,12 @@ void PlanBuilder::addInitializer(const ::neurun::graph::operand::Index &ind,
 
 void PlanBuilder::addStage(const Stage &stage) { _stages.emplace_back(stage); }
 
-void PlanBuilder::finalize(BackendResolver &backend_resolver)
+void PlanBuilder::finalize()
 {
-  auto tensor_builders = backend_resolver.getAllTensorBuilders();
-
   // Prepare tensors
-  for (auto &tensor_builder : tensor_builders)
+  for (auto &tensor_builder : _tensor_builders)
   {
-    tensor_builder->prepare(_tensor_info_ctx);
+    tensor_builder->prepare(_plan, _tensor_info_ctx);
   }
 
   // Process Stage
@@ -39,7 +42,7 @@ void PlanBuilder::finalize(BackendResolver &backend_resolver)
 
   // TODO Add code for CPU/ACL tensor allocation
   // Allocate Tensor Memory for cl_tensors
-  for (auto &tensor_builder : tensor_builders)
+  for (auto &tensor_builder : _tensor_builders)
   {
     tensor_builder->allocate();
   }
index 2f848e2..d074abf 100644 (file)
@@ -5,6 +5,7 @@
 #include "codegen/Plan.h"
 #include "codegen/BackendResolver.h"
 #include "backend/IStageGenerator.h"
+#include "backend/ITensorBuilder.h"
 
 namespace neurun
 {
@@ -42,6 +43,9 @@ public:
                       const ::arm_compute::TensorInfo &info) override;
 
 public:
+  void addTensorBuilder(std::shared_ptr<backend::ITensorBuilder> tensor_builder) override;
+
+public:
   void addInitializer(const ::neurun::graph::operand::Index &ind,
                       const Initializer &initializer) override;
 
@@ -49,7 +53,7 @@ public:
   void addStage(const Stage &stage) override;
 
 public:
-  void finalize(BackendResolver &backend_resolver);
+  void finalize();
 
 public:
   const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx() { return _tensor_info_ctx; }
@@ -60,6 +64,7 @@ private:
 private:
   std::map<int, ::arm_compute::TensorInfo> _tensor_info_ctx;
   std::map<int, Initializer> _initializer_ctx;
+  std::set<std::shared_ptr<backend::ITensorBuilder>> _tensor_builders;
   std::vector<Stage> _stages;
 };
 
index 53facf4..8d0bf3a 100644 (file)
@@ -6,6 +6,7 @@
 #include "graph/operand/Set.h"
 #include "codegen/IPlanBuilder.h"
 #include "codegen/BackendResolver.h"
+#include "graph/operation/LowerInfo.h"
 
 namespace neurun
 {
@@ -31,14 +32,21 @@ void Planner::visit(const graph::operation::Conv2D::Implicit::Node &node)
   _builder.addShapeConstr(ker_index, ::internal::asTensorInfo(ker_shape));
   _builder.addShapeConstr(bias_index, ::internal::asTensorInfo(bias_size));
 
+  // backend
+  auto backend = node.lower_info()->backend();
+
   // Generate Initializers
-  auto init_gen = _backend_resolver.getInitializerGenerator(typeid(node));
+  auto init_gen = backend.initializer_gen();
   _builder.addInitializer(ker_index, init_gen->generateWeight(node));
   _builder.addInitializer(bias_index, init_gen->generateBias(node));
 
   // Generate Stage
-  auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
+  auto stage_gen = backend.stage_gen();
   _builder.addStage(stage_gen->generate(node));
+
+  // Generate TensorBuilder
+  auto tensor_builder = backend.tensor_builder();
+  _builder.addTensorBuilder(tensor_builder);
 }
 
 void Planner::visit(const graph::operation::MaxPool2D::Implicit::Node &node)
@@ -53,9 +61,16 @@ void Planner::visit(const graph::operation::MaxPool2D::Implicit::Node &node)
   _builder.addShapeConstr(ofm_index, ::internal::asTensorInfo(ofm_shape));
   _builder.addShapeConstr(ifm_index, ::internal::asTensorInfo(ifm_shape));
 
+  // backend
+  auto backend = node.lower_info()->backend();
+
   // Generate Stage
-  auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
+  auto stage_gen = backend.stage_gen();
   _builder.addStage(stage_gen->generate(node));
+
+  // Generate TensorBuilder
+  auto tensor_builder = backend.tensor_builder();
+  _builder.addTensorBuilder(tensor_builder);
 }
 
 void Planner::visit(const graph::operation::AvgPool2D::Implicit::Node &node)
@@ -70,9 +85,16 @@ void Planner::visit(const graph::operation::AvgPool2D::Implicit::Node &node)
   _builder.addShapeConstr(ofm_index, ::internal::asTensorInfo(ofm_shape));
   _builder.addShapeConstr(ifm_index, ::internal::asTensorInfo(ifm_shape));
 
+  // backend
+  auto backend = node.lower_info()->backend();
+
   // Generate Stage
-  auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
+  auto stage_gen = backend.stage_gen();
   _builder.addStage(stage_gen->generate(node));
+
+  // Generate TensorBuilder
+  auto tensor_builder = backend.tensor_builder();
+  _builder.addTensorBuilder(tensor_builder);
 }
 
 void Planner::visit(const graph::operation::Concat::Node &node)
@@ -99,9 +121,16 @@ void Planner::visit(const graph::operation::Concat::Node &node)
     _builder.addShapeConstr(ifm_index, ::internal::asTensorInfo(ifm_shape));
   }
 
+  // backend
+  auto backend = node.lower_info()->backend();
+
   // Generate Stage
-  auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
+  auto stage_gen = backend.stage_gen();
   _builder.addStage(stage_gen->generate(node));
+
+  // Generate TensorBuilder
+  auto tensor_builder = backend.tensor_builder();
+  _builder.addTensorBuilder(tensor_builder);
 }
 
 void Planner::visit(const graph::operation::FullyConnected::Node &node)
@@ -137,14 +166,21 @@ void Planner::visit(const graph::operation::FullyConnected::Node &node)
                           ::internal::asTensorInfo(num_output /*H*/, input_size /*W*/));
   _builder.addShapeConstr(bias_index, ::internal::asTensorInfo(bias_size));
 
+  // backend
+  auto backend = node.lower_info()->backend();
+
   // Generate Initializers
-  auto init_gen = _backend_resolver.getInitializerGenerator(typeid(node));
+  auto init_gen = backend.initializer_gen();
   _builder.addInitializer(weight_index, init_gen->generateWeight(node));
   _builder.addInitializer(bias_index, init_gen->generateBias(node));
 
   // Generate Stage
-  auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
+  auto stage_gen = backend.stage_gen();
   _builder.addStage(stage_gen->generate(node));
+
+  // Generate TensorBuilder
+  auto tensor_builder = backend.tensor_builder();
+  _builder.addTensorBuilder(tensor_builder);
 }
 
 void Planner::visit(const graph::operation::Reshape::Node &node)
@@ -172,9 +208,16 @@ void Planner::visit(const graph::operation::Reshape::Node &node)
   _builder.addShapeConstr(output_index, ::internal::asTensorInfo(out_size));
   _builder.addShapeConstr(input_index, ::internal::asTensorInfo(ifm_shape));
 
+  // backend
+  auto backend = node.lower_info()->backend();
+
   // Generate Stage
-  auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
+  auto stage_gen = backend.stage_gen();
   _builder.addStage(stage_gen->generate(node));
+
+  // Generate TensorBuilder
+  auto tensor_builder = backend.tensor_builder();
+  _builder.addTensorBuilder(tensor_builder);
 }
 
 void Planner::visit(const graph::operation::Softmax::Node &node)
@@ -197,9 +240,16 @@ void Planner::visit(const graph::operation::Softmax::Node &node)
   _builder.addShapeConstr(output_index, ::internal::asTensorInfo(len));
   _builder.addShapeConstr(input_index, ::internal::asTensorInfo(len));
 
+  // backend
+  auto backend = node.lower_info()->backend();
+
   // Generate Stage
-  auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
+  auto stage_gen = backend.stage_gen();
   _builder.addStage(stage_gen->generate(node));
+
+  // Generate TensorBuilder
+  auto tensor_builder = backend.tensor_builder();
+  _builder.addTensorBuilder(tensor_builder);
 }
 
 void Planner::visit(const graph::operation::NOP::Node & /* node */)
index cad2f2e..05e4f44 100644 (file)
@@ -25,9 +25,8 @@ class BackendResolver;
 class Planner : public graph::operation::NodeVisitor
 {
 public:
-  Planner(const neurun::graph::operand::Set &ctx, neurun::codegen::IPlanBuilder &builder,
-          neurun::codegen::BackendResolver &backend_resolver)
-      : _ctx{ctx}, _builder{builder}, _backend_resolver(backend_resolver)
+  Planner(const neurun::graph::operand::Set &ctx, neurun::codegen::IPlanBuilder &builder)
+      : _ctx{ctx}, _builder{builder}
   {
   }
 
@@ -44,7 +43,6 @@ public:
 private:
   const neurun::graph::operand::Set &_ctx;
   neurun::codegen::IPlanBuilder &_builder;
-  neurun::codegen::BackendResolver &_backend_resolver;
 };
 
 } // namespace codegen
index 7e9bff6..1e1cf3f 100644 (file)
@@ -38,16 +38,14 @@ int ANeuralNetworksCompilation::finish()
   // Dump ops
   linear->accept(neurun::codegen::Dumper{});
 
-  ::internal::BackendManager backend_manager{plan};
-  neurun::codegen::BackendResolver backend_resolver{backend_manager};
   neurun::codegen::PlanBuilder plan_builder{plan};
 
-  linear->markTensors(backend_resolver);
+  linear->markTensors();
 
-  linear->accept(neurun::codegen::Planner{operands, plan_builder, backend_resolver});
+  linear->accept(neurun::codegen::Planner{operands, plan_builder});
 
   // TODO Add optimization passes
-  plan_builder.finalize(backend_resolver);
+  plan_builder.finalize();
 
   return ANEURALNETWORKS_NO_ERROR;
 }
index ef97c84..531ef6d 100644 (file)
@@ -8,6 +8,7 @@
 #include "nnfw/std/memory.h"
 #include "linear/Linear.h"
 #include "operation/LowerInfo.h"
+#include "codegen/BackendResolver.h"
 
 namespace neurun
 {
@@ -110,9 +111,10 @@ void Graph::lower(void)
 
   // Lower
   {
+    auto _backend_resolver = neurun::codegen::BackendResolver(_operands);
     _operations.iterate([&](const operation::Index &, operation::Node &node) {
-      // TODO Update backend id accordingly. Currently "acl_cl" by default
-      node.lower_info(nnfw::make_unique<operation::LowerInfo>(std::string("acl_cl")));
+      auto backend = _backend_resolver.getBackend(typeid(node));
+      node.lower_info(nnfw::make_unique<operation::LowerInfo>(backend));
     });
   }
 
index c59bd1c..e8c19c8 100644 (file)
@@ -7,7 +7,7 @@ namespace graph
 namespace operation
 {
 
-LowerInfo::LowerInfo(const std::string &backend_id) : _backend_id(backend_id)
+LowerInfo::LowerInfo(const internal::Backend &backend) : _backend(backend)
 {
   // DO NOTHING
 }
index c524b00..56d1aa1 100644 (file)
@@ -3,6 +3,8 @@
 
 #include <string>
 
+#include "internal/BackendManager.h"
+
 namespace neurun
 {
 namespace graph
@@ -13,11 +15,11 @@ namespace operation
 class LowerInfo
 {
 public:
-  LowerInfo(const std::string &backend_id);
-  const std::string &backend_id() const { return _backend_id; }
+  LowerInfo(const internal::Backend &backend);
+  const internal::Backend &backend() const { return _backend; }
 
 private:
-  const std::string _backend_id;
+  internal::Backend _backend;
 };
 
 } // namespace operation
index 292b751..e2781ce 100644 (file)
@@ -15,20 +15,33 @@ namespace internal
 Backend::Backend(const std::shared_ptr<neurun::backend::IBackendInitializer> &backend_initializer,
                  const std::shared_ptr<neurun::backend::IInitializerGenerator> &initializer_gen,
                  const std::shared_ptr<neurun::backend::IStageGenerator> &stage_gen)
-    : initializer_gen(initializer_gen), stage_gen(stage_gen)
+    : _initializer_gen(initializer_gen), _stage_gen(stage_gen)
 {
   backend_initializer->initialize();
 }
 
-BackendManager::BackendManager(neurun::codegen::Plan &plan) : _plan(plan)
+const std::shared_ptr<neurun::backend::IInitializerGenerator> Backend::initializer_gen() const
 {
-  const auto &operands = _plan.model().operands();
+  return _initializer_gen;
+}
+
+const std::shared_ptr<neurun::backend::IStageGenerator> Backend::stage_gen() const
+{
+  return _stage_gen;
+}
 
+const std::shared_ptr<neurun::backend::ITensorBuilder> Backend::tensor_builder() const
+{
+  return _stage_gen->tensor_builder();
+}
+
+BackendManager::BackendManager(const neurun::graph::operand::Set &operands)
+{
   // Add arm_compute backend
   {
     using namespace ::neurun::backend::acl_cl;
     auto acl_backend_initializer = std::make_shared<BackendInitializer>();
-    auto acl_tensor_builder = std::make_shared<TensorBuilder>(_plan);
+    auto acl_tensor_builder = std::make_shared<TensorBuilder>();
     auto acl_initializer_gen = std::make_shared<InitializerGenerator>(operands);
     auto acl_stage_gen = std::make_shared<StageGenerator>(operands, acl_tensor_builder);
 
@@ -40,7 +53,7 @@ BackendManager::BackendManager(neurun::codegen::Plan &plan) : _plan(plan)
   {
     using namespace ::neurun::backend::cpu;
     auto cpu_backend_initializer = std::make_shared<BackendInitializer>();
-    auto cpu_tensor_builder = std::make_shared<TensorBuilder>(_plan);
+    auto cpu_tensor_builder = std::make_shared<TensorBuilder>();
     auto cpu_initializer_gen = std::make_shared<InitializerGenerator>(operands);
     auto cpu_stage_gen = std::make_shared<StageGenerator>(operands, cpu_tensor_builder);
 
index a5f51db..1585cce 100644 (file)
@@ -3,7 +3,7 @@
 
 #include <memory>
 
-#include "codegen/Plan.h"
+#include "graph/operand/Set.h"
 
 namespace neurun
 {
@@ -21,30 +21,36 @@ struct ITensorBuilder;
 namespace internal
 {
 
-struct Backend
+class Backend
 {
-  std::shared_ptr<neurun::backend::IInitializerGenerator> initializer_gen;
-  std::shared_ptr<neurun::backend::IStageGenerator> stage_gen;
-
+public:
   Backend(const std::shared_ptr<neurun::backend::IBackendInitializer> &backend_initializer,
           const std::shared_ptr<neurun::backend::IInitializerGenerator> &initializer_gen,
           const std::shared_ptr<neurun::backend::IStageGenerator> &stage_gen);
 
-  Backend(void) : initializer_gen(nullptr), stage_gen(nullptr)
+  Backend(void) : _initializer_gen(nullptr), _stage_gen(nullptr)
   {
     // DO NOTHING
   }
+
+public:
+  const std::shared_ptr<neurun::backend::IInitializerGenerator> initializer_gen() const;
+  const std::shared_ptr<neurun::backend::IStageGenerator> stage_gen() const;
+  const std::shared_ptr<neurun::backend::ITensorBuilder> tensor_builder() const;
+
+private:
+  std::shared_ptr<neurun::backend::IInitializerGenerator> _initializer_gen;
+  std::shared_ptr<neurun::backend::IStageGenerator> _stage_gen;
 };
 
 class BackendManager
 {
 public:
-  BackendManager(neurun::codegen::Plan &plan);
+  BackendManager(const neurun::graph::operand::Set &operands);
 
   Backend get(const std::string &key);
 
 private:
-  neurun::codegen::Plan &_plan;
   std::map<std::string, Backend> _gen_map;
 };
 
index a07ceda..e898b73 100644 (file)
@@ -3,6 +3,7 @@
 #include "graph/Graph.h"
 
 #include "codegen/BackendResolver.h"
+#include "graph/operation/LowerInfo.h"
 
 namespace neurun
 {
@@ -32,11 +33,11 @@ void Linear::accept(graph::operation::NodeVisitor &&visitor) const
   }
 }
 
-void Linear::markTensors(neurun::codegen::BackendResolver &resolver) const
+void Linear::markTensors() const
 {
   for (const auto op : _operations)
   {
-    auto tensor_builder = resolver.getTensorBuilder(typeid(*op));
+    const auto tensor_builder = op->lower_info()->backend().stage_gen()->tensor_builder();
     for (const auto &ind : op->getInputs())
     {
       tensor_builder->mark(ind);
index a8a9a3e..574a29b 100644 (file)
@@ -49,7 +49,7 @@ public:
   void accept(graph::operation::NodeVisitor &&visitor) const;
 
   // TODO Remove this since tensor marking will be replaced with another way
-  virtual void markTensors(neurun::codegen::BackendResolver &) const;
+  virtual void markTensors() const;
 
 public:
 private: