[neurun] Do backends as shared and update BackendManager to support plugin (#3793)
authorДилшоджон Умронхонович Пошшоев/AI Tools Lab /SRR/Engineer/삼성전자 <d.poshshoev@samsung.com>
Thu, 6 Dec 2018 06:06:23 +0000 (09:06 +0300)
committer오형석/동작제어Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Thu, 6 Dec 2018 06:06:23 +0000 (15:06 +0900)
Related issue: #3703
This is the initial step of converting backends to plugins
Update BackendManager to creage a map using plugins

Signed-off-by: Poshshoev Dilshodzhon <d.poshshoev@samsung.com>
runtimes/neurun/CMakeLists.txt
runtimes/neurun/src/backend/BackendManager.cc
runtimes/neurun/src/backend/BackendManager.h
runtimes/neurun/src/backend/acl_cl/CMakeLists.txt
runtimes/neurun/src/backend/acl_cl/PluginClassesAllocator.cc [new file with mode: 0644]
runtimes/neurun/src/backend/cpu/CMakeLists.txt
runtimes/neurun/src/backend/cpu/PluginClassesAllocator.cc [new file with mode: 0644]

index 4407f38..08237b6 100644 (file)
@@ -38,7 +38,7 @@ target_link_libraries(${LIB_NEURUN} tensorflow-lite)
 target_link_libraries(${LIB_NEURUN} nnfw_util)
 target_link_libraries(${LIB_NEURUN} nnfw_support_nnapi)
 
-# TODO This should be optional
+# TODO This will be removed when backends are converted to plugins
 target_link_libraries(${LIB_NEURUN} ${LIB_NEURUN_BACKEND_CPU})
 target_link_libraries(${LIB_NEURUN} ${LIB_NEURUN_BACKEND_ACL_CL})
 
index 0dbb14c..8b31af4 100644 (file)
  * limitations under the License.
  */
 
+#include <dlfcn.h>
 #include "BackendManager.h"
 
-#include "backend/acl_cl/Config.h"
-#include "backend/acl_cl/TensorBuilder.h"
-#include "backend/acl_cl/StageGenerator.h"
-#include "backend/cpu/Config.h"
-#include "backend/cpu/TensorBuilder.h"
-#include "backend/cpu/StageGenerator.h"
+#include "backend/interface/IConfig.h"
+#include "backend/interface/ITensorBuilder.h"
+#include "backend/interface/IStageGenerator.h"
 
 namespace neurun
 {
@@ -47,25 +45,48 @@ const std::shared_ptr<neurun::backend::ITensorBuilder> Backend::tensor_builder()
   return _stage_gen->tensor_builder();
 }
 
-BackendManager::BackendManager(const neurun::graph::operand::Set &operands)
+template <typename T, class... Types>
+void BackendManager::loadObjectFromPlugin(std::shared_ptr<T> &object_of_plugin_class,
+                                          const std::string obj_creator_func_name, void *handle,
+                                          Types &&... args)
 {
-  // Add arm_compute backend
+  T *(*allocate_obj)(Types && ... Args);
+  // load object creator function
+  allocate_obj = (T * (*)(Types && ... Args))dlsym(handle, obj_creator_func_name.c_str());
+  if (allocate_obj == nullptr)
   {
-    using namespace ::neurun::backend::acl_cl;
-    auto config = std::make_shared<Config>();
-    auto tensor_builder = std::make_shared<TensorBuilder>();
-    auto stage_gen = std::make_shared<StageGenerator>(operands, tensor_builder);
-
-    _gen_map[config->id()] = {config, stage_gen};
+    fprintf(stderr, "BackendManager: unable to open function %s: %s", obj_creator_func_name.c_str(),
+            dlerror());
+    abort();
   }
 
-  // Add CPU backend
+  object_of_plugin_class.reset(allocate_obj(args...));
+}
+BackendManager::BackendManager(const neurun::graph::operand::Set &operands)
+{
+  // TODO handle plugin loading: TBD how
+  std::string plugins[] = {"libbackend_acl_cl.so", "libbackend_cpu.so"};
+  for (auto plugin : plugins)
   {
-    using namespace ::neurun::backend::cpu;
-    auto config = std::make_shared<Config>();
-    auto tensor_builder = std::make_shared<TensorBuilder>();
-    auto stage_gen = std::make_shared<StageGenerator>(operands, tensor_builder);
+    void *handle = dlopen(plugin.c_str(), RTLD_LAZY | RTLD_LOCAL);
+    if (handle == nullptr)
+    {
+      fprintf(stderr, "BackendManager %s: %s\n", plugin.c_str(), dlerror());
+      abort();
+    }
+
+    // load Config
+    std::shared_ptr<neurun::backend::IConfig> config;
+    loadObjectFromPlugin(config, std::string("allocate_Config"), handle);
+
+    // load TensorBuilder
+    std::shared_ptr<neurun::backend::ITensorBuilder> tensor_builder;
+    loadObjectFromPlugin(tensor_builder, std::string("allocate_TensorBuilder"), handle);
 
+    // load StageGenerator
+    std::shared_ptr<neurun::backend::IStageGenerator> stage_gen;
+    loadObjectFromPlugin(stage_gen, std::string("allocate_StageGenerator"), handle, operands,
+                         tensor_builder);
     _gen_map[config->id()] = {config, stage_gen};
   }
 }
index aeb4153..ed832dd 100644 (file)
@@ -61,6 +61,21 @@ public:
 
 private:
   std::map<std::string, Backend> _gen_map;
+  /**
+   * @brief Allocate an object of a class of a plugin by loading a plugin function, that does
+   * allocation, and calling it
+   *
+   * @param object_of_plugin_class target object
+   * @param obj_creator_func_name name of the plugin function, that allocates an object
+   * @param handle handle of the plugin
+   * @param args arguments to pass to constructor of the plugin class
+   *
+   * @return
+   */
+  template <typename T, class... Types>
+  void loadObjectFromPlugin(std::shared_ptr<T> &object_of_plugin_class,
+                            const std::string obj_creator_func_name, void *handle,
+                            Types &&... args);
 };
 
 } // namespace backend
index d64c23a..f1c17aa 100644 (file)
@@ -1,6 +1,6 @@
 file(GLOB_RECURSE SOURCES "*.cc")
 
-add_library(${LIB_NEURUN_BACKEND_ACL_CL} STATIC ${SOURCES})
+add_library(${LIB_NEURUN_BACKEND_ACL_CL} SHARED ${SOURCES})
 
 target_include_directories(${LIB_NEURUN_BACKEND_ACL_CL} PUBLIC ${NNFW_INCLUDE_DIR})
 target_include_directories(${LIB_NEURUN_BACKEND_ACL_CL} PUBLIC ${NEURUN_INCLUDE_DIR})
@@ -12,6 +12,5 @@ target_link_libraries(${LIB_NEURUN_BACKEND_ACL_CL} ${LIB_NEURUN_KERNEL_ACL_CL})
 
 target_compile_options(${LIB_NEURUN_BACKEND_ACL_CL} PRIVATE -Wall -Wextra -Werror)
 
-set_target_properties(${LIB_NEURUN_BACKEND_ACL_CL} PROPERTIES POSITION_INDEPENDENT_CODE ON)
 set_target_properties(${LIB_NEURUN_BACKEND_ACL_CL} PROPERTIES OUTPUT_NAME backend_acl_cl)
 install(TARGETS ${LIB_NEURUN_BACKEND_ACL_CL} DESTINATION lib/neurun)
diff --git a/runtimes/neurun/src/backend/acl_cl/PluginClassesAllocator.cc b/runtimes/neurun/src/backend/acl_cl/PluginClassesAllocator.cc
new file mode 100644 (file)
index 0000000..fdb37c1
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <memory>
+#include "TensorBuilder.h"
+#include "StageGenerator.h"
+#include "Config.h"
+#include "logging.h"
+
+extern "C" {
+neurun::backend::acl_cl::TensorBuilder *allocate_TensorBuilder()
+{
+  VERBOSE(allocate_TensorBuilder) << "loaded from acl_cl\n";
+  return new neurun::backend::acl_cl::TensorBuilder;
+}
+
+neurun::backend::acl_cl::StageGenerator *allocate_StageGenerator(
+    const neurun::graph::operand::Set &operand_ctx,
+    const std::shared_ptr<neurun::backend::acl_cl::TensorBuilder> &tensor_builder)
+{
+  VERBOSE(allocate_StageGenerator) << "loaded from acl_cl\n";
+  return new neurun::backend::acl_cl::StageGenerator(operand_ctx, tensor_builder);
+}
+
+neurun::backend::acl_cl::Config *allocate_Config()
+{
+  VERBOSE(allocate_Config) << "loaded from acl_cl\n";
+  return new neurun::backend::acl_cl::Config;
+}
+}
index 95e9af6..731aa31 100644 (file)
@@ -1,6 +1,6 @@
 file(GLOB_RECURSE SOURCES "*.cc")
 
-add_library(${LIB_NEURUN_BACKEND_CPU} STATIC ${SOURCES})
+add_library(${LIB_NEURUN_BACKEND_CPU} SHARED ${SOURCES})
 
 target_include_directories(${LIB_NEURUN_BACKEND_CPU} PUBLIC ${NNFW_INCLUDE_DIR})
 target_include_directories(${LIB_NEURUN_BACKEND_CPU} PUBLIC ${NEURUN_INCLUDE_DIR})
@@ -11,9 +11,10 @@ target_link_libraries(${LIB_NEURUN_BACKEND_CPU} tensorflow-lite)
 target_link_libraries(${LIB_NEURUN_BACKEND_CPU} nnfw_util)
 target_link_libraries(${LIB_NEURUN_BACKEND_CPU} nnfw_support_nnapi)
 target_link_libraries(${LIB_NEURUN_BACKEND_CPU} ${LIB_NEURUN_KERNEL_CPU})
+# TODO remove this line once acl dependency is removed from PermuteLayer
+target_link_libraries(${LIB_NEURUN_BACKEND_CPU} ${LIB_NEURUN_BACKEND_ACL_CL})
 
 target_compile_options(${LIB_NEURUN_BACKEND_CPU} PRIVATE -Wall -Wextra -Werror)
 
-set_target_properties(${LIB_NEURUN_BACKEND_CPU} PROPERTIES POSITION_INDEPENDENT_CODE ON)
 set_target_properties(${LIB_NEURUN_BACKEND_CPU} PROPERTIES OUTPUT_NAME backend_cpu)
 install(TARGETS ${LIB_NEURUN_BACKEND_CPU} DESTINATION lib/neurun)
diff --git a/runtimes/neurun/src/backend/cpu/PluginClassesAllocator.cc b/runtimes/neurun/src/backend/cpu/PluginClassesAllocator.cc
new file mode 100644 (file)
index 0000000..5e1a366
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <memory>
+#include "TensorBuilder.h"
+#include "StageGenerator.h"
+#include "Config.h"
+#include "logging.h"
+
+extern "C" {
+neurun::backend::cpu::TensorBuilder *allocate_TensorBuilder()
+{
+  VERBOSE(allocate_TensorBuilder) << "loaded from CPU\n";
+  return new neurun::backend::cpu::TensorBuilder;
+}
+
+neurun::backend::cpu::StageGenerator *
+allocate_StageGenerator(const neurun::graph::operand::Set &operand_ctx,
+                        const std::shared_ptr<neurun::backend::cpu::TensorBuilder> &tensor_builder)
+{
+  VERBOSE(allocate_StageGenerator) << "loaded from CPU\n";
+  return new neurun::backend::cpu::StageGenerator(operand_ctx, tensor_builder);
+}
+
+neurun::backend::cpu::Config *allocate_Config()
+{
+  VERBOSE(allocate_Config) << "loaded from CPU\n";
+  return new neurun::backend::cpu::Config;
+}
+}