[neurun] Extract BackendResolver into a separate file (#2273)
author이한종/동작제어Lab(SR)/Engineer/삼성전자 <hanjoung.lee@samsung.com>
Mon, 13 Aug 2018 09:17:07 +0000 (18:17 +0900)
committer오형석/동작제어Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Mon, 13 Aug 2018 09:17:07 +0000 (18:17 +0900)
Extract BackendResolver into a separate file and move it to codegen
directory.

Signed-off-by: Hanjoung Lee <hanjoung.lee@samsung.com>
include/util/EnvVar.h
runtimes/neurun/CMakeLists.txt
runtimes/neurun/src/codegen/BackendResolver.cc [new file with mode: 0644]
runtimes/neurun/src/codegen/BackendResolver.h [new file with mode: 0644]
runtimes/neurun/src/compilation.cc

index ec18678..5512fed 100644 (file)
@@ -17,6 +17,7 @@
 #ifndef __NNFW_UTIL_ENV_VAR__
 #define __NNFW_UTIL_ENV_VAR__
 
+#include <algorithm>
 #include <array>
 #include <cstdlib>
 #include <string>
index 2c9b631..6ee319c 100644 (file)
@@ -16,8 +16,9 @@ file(GLOB SOURCES "src/*.cc")
 file(GLOB SOURCES_FRONTEND "src/frontend/*.cc")
 file(GLOB_RECURSE SOURCES_INTERNAL "src/internal/*.cc")
 file(GLOB_RECURSE SOURCES_GRAPH "src/graph/*.cc")
+file(GLOB_RECURSE SOURCES_CODEGEN "src/codegen/*.cc")
 
-set(SOURCES ${SOURCES} ${SOURCES_FRONTEND} ${SOURCES_INTERNAL} ${SOURCES_GRAPH})
+set(SOURCES ${SOURCES} ${SOURCES_FRONTEND} ${SOURCES_INTERNAL} ${SOURCES_GRAPH} ${SOURCES_CODEGEN})
 
 # NOTE For now ARMCompute is necessary
 # TODO Remove required package below(should be optional)
diff --git a/runtimes/neurun/src/codegen/BackendResolver.cc b/runtimes/neurun/src/codegen/BackendResolver.cc
new file mode 100644 (file)
index 0000000..d5e1d15
--- /dev/null
@@ -0,0 +1,42 @@
+#include "BackendResolver.h"
+
+namespace neurun
+{
+namespace codegen
+{
+
+std::shared_ptr<neurun::backend::IInitializerGenerator>
+BackendResolver::getInitializerGenerator(const std::type_index &type)
+{
+  return _gen_map.at(type).initializer_gen;
+}
+
+std::shared_ptr<neurun::backend::IStageGenerator>
+BackendResolver::getStageGenerator(const std::type_index &type)
+{
+  return _gen_map.at(type).stage_gen;
+}
+
+std::shared_ptr<neurun::backend::ITensorBuilder>
+BackendResolver::getTensorBuilder(const std::type_index &type)
+{
+  return getStageGenerator(type)->tensor_builder();
+}
+
+std::set<std::shared_ptr<neurun::backend::ITensorBuilder>> BackendResolver::getAllTensorBuilders()
+{
+  std::set<std::shared_ptr<neurun::backend::ITensorBuilder>> ret;
+  for (const auto &it : _gen_map)
+  {
+    ret.insert(it.second.stage_gen->tensor_builder());
+  }
+  return ret;
+}
+
+std::shared_ptr<::internal::common::TensorBuilder> BackendResolver::getCommonTensorBuilder()
+{
+  return _backend_manager.getCommonTensorBuilder();
+}
+
+} // namespace neurun
+} // namespace codegen
diff --git a/runtimes/neurun/src/codegen/BackendResolver.h b/runtimes/neurun/src/codegen/BackendResolver.h
new file mode 100644 (file)
index 0000000..2adccda
--- /dev/null
@@ -0,0 +1,78 @@
+#ifndef __NEURUN_CODEGEN_BACKEND_RESOLVER_H__
+#define __NEURUN_CODEGEN_BACKEND_RESOLVER_H__
+
+#include <set>
+#include <typeindex>
+
+#include "logging.h"
+#include "util/EnvVar.h"
+#include "internal/BackendManager.h"
+#include "backend/IInitializerGenerator.h"
+#include "backend/IStageGenerator.h"
+#include "internal/common/TensorBuilder.h"
+
+namespace neurun
+{
+namespace codegen
+{
+
+class BackendResolver
+{
+public:
+  BackendResolver(::internal::BackendManager &backend_manager) : _backend_manager(backend_manager)
+  {
+    const auto &backend_all_str =
+        ::nnfw::util::EnvVar{std::string("OP_BACKEND_ALLOPS")}.asString("none");
+    if (backend_all_str.compare("none") != 0)
+    {
+      VERBOSE(BackendResolver) << "Use backend for all ops: " << backend_all_str << std::endl;
+#define OP(InternalName, NnApiName)                                         \
+  {                                                                         \
+    auto backend = _backend_manager.get(backend_all_str);                   \
+    _gen_map[typeid(::internal::tflite::op::InternalName::Node)] = backend; \
+  }
+#include "internal/op/Op.lst"
+#undef OP
+    }
+    else
+    {
+#define OP(InternalName, NnApiName)                                                               \
+  {                                                                                               \
+    const auto &backend_str =                                                                     \
+        ::nnfw::util::EnvVar{std::string("OP_BACKEND_") + #NnApiName}.asString("acl_cl");         \
+    auto backend = _backend_manager.get(backend_str);                                             \
+    VERBOSE(BackendResolver) << "backend for " << #NnApiName << ": " << backend_str << std::endl; \
+    _gen_map[typeid(::internal::tflite::op::InternalName::Node)] = backend;                       \
+  }
+
+#include "internal/op/Op.lst"
+#undef OP
+    }
+
+    // TODO : It's just workaround. It's logic should be changed.
+    _gen_map[typeid(::internal::tflite::op::TensorConvert::CpuFromCommon::Node)] =
+        backend_manager.get("cpu");
+    _gen_map[typeid(::internal::tflite::op::TensorConvert::CpuToCommon::Node)] =
+        backend_manager.get("cpu");
+    _gen_map[typeid(::internal::tflite::op::TensorConvert::AclFromCommon::Node)] =
+        backend_manager.get("acl_cl");
+    _gen_map[typeid(::internal::tflite::op::TensorConvert::AclToCommon::Node)] =
+        backend_manager.get("acl_cl");
+  }
+
+  std::shared_ptr<neurun::backend::IInitializerGenerator>
+  getInitializerGenerator(const std::type_index &type);
+  std::shared_ptr<neurun::backend::IStageGenerator> getStageGenerator(const std::type_index &type);
+  std::shared_ptr<neurun::backend::ITensorBuilder> getTensorBuilder(const std::type_index &type);
+  std::set<std::shared_ptr<neurun::backend::ITensorBuilder>> getAllTensorBuilders();
+  std::shared_ptr<::internal::common::TensorBuilder> getCommonTensorBuilder();
+
+private:
+  std::unordered_map<std::type_index, ::internal::Backend> _gen_map;
+  ::internal::BackendManager &_backend_manager;
+};
+
+} // namespace codegen
+} // namespace neurun
+
+#endif // __NEURUN_CODEGEN_BACKEND_RESOLVER_H__
index 9e588f6..648d277 100644 (file)
@@ -1,7 +1,6 @@
 #include <NeuralNetworks.h>
 
 #include <algorithm>
-#include <typeindex>
 
 #include <arm_compute/core/CL/ICLTensor.h>
 
@@ -17,7 +16,6 @@
 #include "backend/IStageGenerator.h"
 #include "internal/common/Tensor.h"
 #include "internal/common/TensorBuilder.h"
-#include "util/EnvVar.h"
 
 #include "compilation.h"
 #include "model.h"
 using TensorSetter = std::function<void(int, const ::arm_compute::TensorInfo &)>;
 
 #include "codegen/IPlanBuilder.h"
-#include "internal/BackendManager.h"
-
-class BackendResolver
-{
-public:
-  BackendResolver(::internal::BackendManager &backend_manager) : _backend_manager(backend_manager)
-  {
-    const auto &backend_all_str =
-        ::nnfw::util::EnvVar{std::string("OP_BACKEND_ALLOPS")}.asString("none");
-    if (backend_all_str.compare("none") != 0)
-    {
-      VERBOSE(BackendResolver) << "Use backend for all ops: " << backend_all_str << std::endl;
-#define OP(InternalName, NnApiName)                                         \
-  {                                                                         \
-    auto backend = _backend_manager.get(backend_all_str);                   \
-    _gen_map[typeid(::internal::tflite::op::InternalName::Node)] = backend; \
-  }
-#include "internal/op/Op.lst"
-#undef OP
-    }
-    else
-    {
-#define OP(InternalName, NnApiName)                                                               \
-  {                                                                                               \
-    const auto &backend_str =                                                                     \
-        ::nnfw::util::EnvVar{std::string("OP_BACKEND_") + #NnApiName}.asString("acl_cl");         \
-    auto backend = _backend_manager.get(backend_str);                                             \
-    VERBOSE(BackendResolver) << "backend for " << #NnApiName << ": " << backend_str << std::endl; \
-    _gen_map[typeid(::internal::tflite::op::InternalName::Node)] = backend;                       \
-  }
-
-#include "internal/op/Op.lst"
-#undef OP
-    }
-
-    // TODO : It's just workaround. It's logic should be changed.
-    _gen_map[typeid(::internal::tflite::op::TensorConvert::CpuFromCommon::Node)] =
-        backend_manager.get("cpu");
-    _gen_map[typeid(::internal::tflite::op::TensorConvert::CpuToCommon::Node)] =
-        backend_manager.get("cpu");
-    _gen_map[typeid(::internal::tflite::op::TensorConvert::AclFromCommon::Node)] =
-        backend_manager.get("acl_cl");
-    _gen_map[typeid(::internal::tflite::op::TensorConvert::AclToCommon::Node)] =
-        backend_manager.get("acl_cl");
-  }
-
-  std::shared_ptr<neurun::backend::IInitializerGenerator>
-  getInitializerGenerator(const std::type_index &type);
-  std::shared_ptr<neurun::backend::IStageGenerator> getStageGenerator(const std::type_index &type);
-  std::shared_ptr<neurun::backend::ITensorBuilder> getTensorBuilder(const std::type_index &type);
-  std::set<std::shared_ptr<neurun::backend::ITensorBuilder>> getAllTensorBuilders();
-  std::shared_ptr<::internal::common::TensorBuilder> getCommonTensorBuilder();
-
-private:
-  std::unordered_map<std::type_index, ::internal::Backend> _gen_map;
-  ::internal::BackendManager &_backend_manager;
-};
-
-std::shared_ptr<neurun::backend::IInitializerGenerator>
-BackendResolver::getInitializerGenerator(const std::type_index &type)
-{
-  return _gen_map.at(type).initializer_gen;
-}
-
-std::shared_ptr<neurun::backend::IStageGenerator>
-BackendResolver::getStageGenerator(const std::type_index &type)
-{
-  return _gen_map.at(type).stage_gen;
-}
-
-std::shared_ptr<neurun::backend::ITensorBuilder>
-BackendResolver::getTensorBuilder(const std::type_index &type)
-{
-  return getStageGenerator(type)->tensor_builder();
-}
-
-std::set<std::shared_ptr<neurun::backend::ITensorBuilder>> BackendResolver::getAllTensorBuilders()
-{
-  std::set<std::shared_ptr<neurun::backend::ITensorBuilder>> ret;
-  for (const auto &it : _gen_map)
-  {
-    ret.insert(it.second.stage_gen->tensor_builder());
-  }
-  return ret;
-}
-
-std::shared_ptr<::internal::common::TensorBuilder> BackendResolver::getCommonTensorBuilder()
-{
-  return _backend_manager.getCommonTensorBuilder();
-}
+#include "codegen/BackendResolver.h"
 
 class Planner : public ::internal::tflite::op::NodeVisitor
 {
 public:
   Planner(const neurun::graph::operand::Set &ctx, neurun::codegen::IPlanBuilder &builder,
-          BackendResolver &backend_resolver)
+          neurun::codegen::BackendResolver &backend_resolver)
       : _ctx{ctx}, _builder{builder}, _backend_resolver(backend_resolver)
   {
   }
@@ -179,7 +88,7 @@ public:
 private:
   const neurun::graph::operand::Set &_ctx;
   neurun::codegen::IPlanBuilder &_builder;
-  BackendResolver &_backend_resolver;
+  neurun::codegen::BackendResolver &_backend_resolver;
 };
 
 void Planner::visit(const ::internal::tflite::op::Conv2D::implicit::Node &node)
@@ -567,7 +476,7 @@ public:
   void addStage(const Stage &stage) override;
 
 public:
-  void finalize(BackendResolver &backend_resolver);
+  void finalize(neurun::codegen::BackendResolver &backend_resolver);
 
 public:
   const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx() { return _tensor_info_ctx; }
@@ -595,7 +504,7 @@ void PlanBuilder::addInitializer(const ::internal::tflite::operand::Index &ind,
 
 void PlanBuilder::addStage(const Stage &stage) { _stages.emplace_back(stage); }
 
-void PlanBuilder::finalize(BackendResolver &backend_resolver)
+void PlanBuilder::finalize(neurun::codegen::BackendResolver &backend_resolver)
 {
   auto tensor_builders = backend_resolver.getAllTensorBuilders();
   auto common_tensor_builder = backend_resolver.getCommonTensorBuilder();
@@ -658,7 +567,7 @@ int ANeuralNetworksCompilation::finish()
   }
 
   ::internal::BackendManager backend_manager{plan};
-  BackendResolver backend_resolver{backend_manager};
+  neurun::codegen::BackendResolver backend_resolver{backend_manager};
   PlanBuilder plan_builder{plan};
 
   for (uint32_t n = 0; n < operations.size(); ++n)