[nnfw] Rename singleton object getter (#8853)
author이한종/On-Device Lab(SR)/Engineer/삼성전자 <hanjoung.lee@samsung.com>
Fri, 8 Nov 2019 04:29:33 +0000 (13:29 +0900)
committer오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Fri, 8 Nov 2019 04:29:33 +0000 (13:29 +0900)
Rename singleton objects' getter metohd `instance` to `get`.
(Since it is shorter)

Signed-off-by: Hanjoung Lee <hanjoung.lee@samsung.com>
19 files changed:
runtime/libs/tflite/include/tflite/TensorLogger.h
runtime/libs/tflite/src/Diff.cpp
runtime/neurun/backend/cpu/MemoryManager.cc
runtime/neurun/backend/cpu/MemoryPlannerFactory.cc
runtime/neurun/backend/cpu/MemoryPlannerFactory.h
runtime/neurun/backend/srcn/MemoryManager.cc
runtime/neurun/backend/srcn/MemoryPlannerFactory.cc
runtime/neurun/backend/srcn/MemoryPlannerFactory.h
runtime/neurun/core/src/backend/BackendManager.cc
runtime/neurun/core/src/backend/BackendManager.h
runtime/neurun/core/src/compiler/Compiler.cc
runtime/neurun/core/src/compiler/ExecutorFactory.cc
runtime/neurun/core/src/compiler/ExecutorFactory.h
runtime/neurun/core/src/compiler/ManualScheduler.cc
runtime/neurun/core/src/dumper/dot/DotDumper.cc
runtime/neurun/core/src/graph/pass/PermutationInsertionPass.cc
runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksModel.cc
runtime/neurun/frontend/nnapi/wrapper/OperationFactory.cc
runtime/neurun/frontend/nnapi/wrapper/OperationFactory.h

index 7b3363b..a824c34 100644 (file)
@@ -61,7 +61,7 @@ public:
    * @brief Get TensorLogger instance
    * @return The TensorLogger instance
    */
-  static TensorLogger &instance()
+  static TensorLogger &get()
   {
     static TensorLogger instance;
     return instance;
index b753120..1a3ac85 100644 (file)
@@ -581,7 +581,7 @@ int RandomTestRunner::run(const nnfw::tflite::Builder &builder)
   std::cout << "[NNAPI TEST] PASSED" << std::endl;
 
   if (_param.tensor_logging)
-    nnfw::tflite::TensorLogger::instance().save(_param.log_path, *tfl_interp);
+    nnfw::tflite::TensorLogger::get().save(_param.log_path, *tfl_interp);
 
   return 0;
 }
index 192a6db..33f1ddc 100644 (file)
@@ -38,7 +38,7 @@ MemoryManager::MemoryManager() : _mem_planner{createMemoryPlanner()}
 IMemoryPlanner *MemoryManager::createMemoryPlanner()
 {
   auto planner_id = util::getConfigString(util::config::CPU_MEMORY_PLANNER);
-  return MemoryPlannerFactory::instance().create(planner_id);
+  return MemoryPlannerFactory::get().create(planner_id);
 }
 
 void MemoryManager::buildTensor(const model::OperandIndex &ind, const model::OperandInfo &info)
index fb3856e..4a2bfa1 100644 (file)
@@ -25,7 +25,7 @@ namespace backend
 namespace cpu
 {
 
-MemoryPlannerFactory &MemoryPlannerFactory::instance()
+MemoryPlannerFactory &MemoryPlannerFactory::get()
 {
   static MemoryPlannerFactory instance;
   return instance;
index 610cc9a..ea577ce 100644 (file)
@@ -29,7 +29,7 @@ namespace cpu
 class MemoryPlannerFactory
 {
 public:
-  static MemoryPlannerFactory &instance();
+  static MemoryPlannerFactory &get();
 
 private:
   MemoryPlannerFactory() = default;
index 10246fe..d05dd2f 100644 (file)
@@ -38,7 +38,7 @@ MemoryManager::MemoryManager() : _mem_planner{createMemoryPlanner()}
 IMemoryPlanner *MemoryManager::createMemoryPlanner()
 {
   auto planner_id = util::getConfigString(util::config::CPU_MEMORY_PLANNER);
-  return MemoryPlannerFactory::instance().create(planner_id);
+  return MemoryPlannerFactory::get().create(planner_id);
 }
 
 void MemoryManager::buildTensor(const model::OperandIndex &ind, const model::OperandInfo &info,
index 0029c38..57dbbcc 100644 (file)
@@ -23,7 +23,7 @@ namespace backend
 namespace srcn
 {
 
-MemoryPlannerFactory &MemoryPlannerFactory::instance()
+MemoryPlannerFactory &MemoryPlannerFactory::get()
 {
   static MemoryPlannerFactory instance;
   return instance;
index 79cb264..13bbd96 100644 (file)
@@ -29,7 +29,7 @@ namespace srcn
 class MemoryPlannerFactory
 {
 public:
-  static MemoryPlannerFactory &instance();
+  static MemoryPlannerFactory &get();
 
 private:
   MemoryPlannerFactory() = default;
index 4494ba0..4004932 100644 (file)
@@ -28,7 +28,7 @@ namespace neurun
 namespace backend
 {
 
-BackendManager &BackendManager::instance()
+BackendManager &BackendManager::get()
 {
   static BackendManager object;
   return object;
index ef10212..ee1a9d7 100644 (file)
@@ -34,7 +34,7 @@ public:
   using backend_create_t = Backend *(*)();
   using backend_destroy_t = void (*)(Backend *);
 
-  static BackendManager &instance();
+  static BackendManager &get();
 
 public:
   Backend *get(const std::string &key);
index 3dbf5e5..aef9514 100644 (file)
@@ -67,9 +67,8 @@ void Compiler::compile(void)
   std::shared_ptr<model::OperationIndexMap<int64_t>> indexed_ranks;
   if (util::getConfigBool(util::config::USE_SCHEDULER))
   {
-    auto scheduler =
-        compiler::HEScheduler(_graph->operands(), backend::BackendManager::instance().getAll(),
-                              _graph->getKernelBuilder());
+    auto scheduler = compiler::HEScheduler(
+        _graph->operands(), backend::BackendManager::get().getAll(), _graph->getKernelBuilder());
     br = scheduler.schedule(*_graph);
     indexed_ranks = scheduler.getIndexedRanks();
   }
@@ -98,7 +97,7 @@ void Compiler::compile(void)
   const std::string executor_str = util::getConfigString(util::config::EXECUTOR);
 
   _executor =
-      std::shared_ptr<exec::IExecutor>{ExecutorFactory::instance().create(executor_str, *_graph)};
+      std::shared_ptr<exec::IExecutor>{ExecutorFactory::get().create(executor_str, *_graph)};
   _executor->setIndexedRanks(indexed_ranks);
   /********************************
    * Code generation phase finished
index 404e16b..745b9dd 100644 (file)
@@ -37,7 +37,7 @@ namespace neurun
 namespace compiler
 {
 
-ExecutorFactory &ExecutorFactory::instance()
+ExecutorFactory &ExecutorFactory::get()
 {
   static ExecutorFactory singleton;
   return singleton;
@@ -154,7 +154,7 @@ exec::IExecutor *ExecutorFactory::createLinearExecutor(graph::Graph &graph)
   // TODO Add optimization passes
 
   // Initialize constant tensors
-  for (const auto backend : backend::BackendManager::instance().getAll())
+  for (const auto backend : backend::BackendManager::get().getAll())
   {
     linear->getBackendContext(backend)->constant_initializer->run();
   }
@@ -314,7 +314,7 @@ exec::IExecutor *ExecutorFactory::createDataflowExecutor(graph::Graph &graph, bo
   }
 
   // Initialize constant tensors
-  for (const auto backend : backend::BackendManager::instance().getAll())
+  for (const auto backend : backend::BackendManager::get().getAll())
   {
     graph.backend_resolver()->getBackendContext(backend)->constant_initializer->run();
   }
@@ -377,7 +377,7 @@ exec::IExecutor *ExecutorFactory::createDataflowExecutor(graph::Graph &graph, bo
                                            std::move(tensor_mgrs), std::move(code_map)};
     if (util::getConfigBool(util::config::PROFILING_MODE))
     {
-      auto et = std::make_shared<backend::ExecTime>(backend::BackendManager::instance().getAll());
+      auto et = std::make_shared<backend::ExecTime>(backend::BackendManager::get().getAll());
       std::unique_ptr<exec::IExecutionObserver> obs =
           nnfw::cpp14::make_unique<exec::ProfileObserver>(et);
       exec->addObserver(std::move(obs));
index 894fec1..bec25b1 100644 (file)
@@ -30,7 +30,7 @@ namespace compiler
 class ExecutorFactory
 {
 public:
-  static ExecutorFactory &instance();
+  static ExecutorFactory &get();
 
 public:
   exec::IExecutor *create(const std::string &id, graph::Graph &graph);
index a9d2297..53f06ce 100644 (file)
@@ -30,7 +30,7 @@ namespace compiler
 std::unique_ptr<BackendResolver> ManualScheduler::schedule(const graph::Graph &graph)
 {
   auto backend_resolver = nnfw::cpp14::make_unique<compiler::BackendResolver>(
-      graph.operands(), backend::BackendManager::instance().getAll(), graph.getKernelBuilder());
+      graph.operands(), backend::BackendManager::get().getAll(), graph.getKernelBuilder());
 
   // 1. Backend for All operations
   auto backend_all_str = util::getConfigString(util::config::OP_BACKEND_ALLOPS);
@@ -44,7 +44,7 @@ std::unique_ptr<BackendResolver> ManualScheduler::schedule(const graph::Graph &g
     while (pos != std::string::npos)
     {
       backend_all_str = backends_str.substr(prev_pos, pos - prev_pos);
-      backend_all = backend::BackendManager::instance().get(backend_all_str);
+      backend_all = backend::BackendManager::get().get(backend_all_str);
       if (backend_all != nullptr)
         break;
 
@@ -55,12 +55,12 @@ std::unique_ptr<BackendResolver> ManualScheduler::schedule(const graph::Graph &g
     if (backend_all == nullptr && prev_pos < backends_str.size())
     {
       backend_all_str = backends_str.substr(prev_pos);
-      backend_all = backend::BackendManager::instance().get(backend_all_str);
+      backend_all = backend::BackendManager::get().get(backend_all_str);
     }
   }
   else
   {
-    backend_all = backend::BackendManager::instance().get(backend_all_str);
+    backend_all = backend::BackendManager::get().get(backend_all_str);
   }
 
   VERBOSE(ManualScheduler) << "Default backend for all ops: " << backend_all_str << std::endl;
@@ -72,13 +72,13 @@ std::unique_ptr<BackendResolver> ManualScheduler::schedule(const graph::Graph &g
   // 2. Backend per operation type
   std::unordered_map<std::type_index, backend::Backend *> op_type_map;
   // By default, Custom uses cpu backend
-  op_type_map[typeid(model::operation::Custom)] = backend::BackendManager::instance().get("cpu");
+  op_type_map[typeid(model::operation::Custom)] = backend::BackendManager::get().get("cpu");
 #define OP(InternalName)                                                                      \
   {                                                                                           \
     const auto &backend_str = util::getConfigString(util::config::OP_BACKEND_##InternalName); \
     if (!backend_str.empty())                                                                 \
     {                                                                                         \
-      auto backend = backend::BackendManager::instance().get(backend_str);                    \
+      auto backend = backend::BackendManager::get().get(backend_str);                         \
       VERBOSE(Lower) << "backend for " << #InternalName << ": " << backend_str << std::endl;  \
       op_type_map[typeid(model::operation::InternalName)] = backend;                          \
     }                                                                                         \
@@ -114,7 +114,7 @@ std::unique_ptr<BackendResolver> ManualScheduler::schedule(const graph::Graph &g
 
       graph.operations().at(model::OperationIndex{key}); // Check if exist, or this wil throw
       backend_resolver->setBackend(model::OperationIndex{key},
-                                   backend::BackendManager::instance().get(val));
+                                   backend::BackendManager::get().get(val));
     }
   }
   catch (...)
@@ -125,7 +125,7 @@ std::unique_ptr<BackendResolver> ManualScheduler::schedule(const graph::Graph &g
 
   // 4. Operations that are specially handled
   //    All configuration above will be ignored(overwritten)
-  op_type_map[typeid(model::operation::Permute)] = backend::BackendManager::instance().get("cpu");
+  op_type_map[typeid(model::operation::Permute)] = backend::BackendManager::get().get("cpu");
 
   // Dump final assignment
   backend_resolver->iterate(
index fe006a9..91f8cdb 100644 (file)
@@ -67,7 +67,7 @@ void DotDumper::dump(const std::string &tag)
     static const auto map = []() {
       std::unordered_map<const backend::Backend *, std::string> ret;
       uint32_t index = 1; // Start from 1 to avoid 0(red) which is too dark :(
-      for (const auto backend : backend::BackendManager::instance().getAll())
+      for (const auto backend : backend::BackendManager::get().getAll())
       {
         ret.emplace(backend, Node::BG_COLORS[index]);
         index = (index + 1) % (sizeof(Node::BG_COLORS) / sizeof(Node::BG_COLORS[0]));
index 9b661cc..7c9668b 100644 (file)
@@ -141,7 +141,7 @@ PermutationInsertionPass::insertPermute(const model::OperandIndex &operand_index
   // NOTE Permute may not have specific layout because the layout of input and output may be
   // different.
   const auto permute_node_layout = model::Layout::UNKNOWN;
-  const auto permute_node_backend = backend::BackendManager::instance().getDefault();
+  const auto permute_node_backend = backend::BackendManager::get().getDefault();
   const operand::PermuteFactor permute_node_factor{permute_node_backend, permute_node_layout};
 
   // Update LowerInfo of input operand
index e3bb291..029411f 100644 (file)
@@ -101,7 +101,7 @@ bool ANeuralNetworksModel::addOperation(ANeuralNetworksOperationType type, uint3
       _operand_usages[outputs[i]] = OperandUsage::OPERATION_OUTPUT;
     }
 
-    auto &factory = OperationFactory::instance();
+    auto &factory = OperationFactory::get();
     OperationFactory::Param param{inputCount, inputs, outputCount, outputs};
 
     auto node = factory.create(type, param, _model->operands);
@@ -128,7 +128,7 @@ bool ANeuralNetworksModel::addOperationEx(ANeuralNetworksOperationTypeEx type, u
       _operand_usages[outputs[i]] = OperandUsage::OPERATION_OUTPUT;
     }
 
-    auto &factory = OperationFactory::instance();
+    auto &factory = OperationFactory::get();
     OperationFactory::Param param{inputCount, inputs, outputCount, outputs};
 
     auto node = factory.create(type, param, _model->operands);
index 95193b8..7b306b6 100644 (file)
@@ -84,7 +84,7 @@ uint32_t getUint32Scalar(Operands &operands, const OperandIndex index)
 
 } // namespace
 
-OperationFactory &OperationFactory::instance()
+OperationFactory &OperationFactory::get()
 {
   static OperationFactory factory;
   return factory;
index ae2a530..4d5d02f 100644 (file)
@@ -43,7 +43,7 @@ public:
                                                              neurun::model::Operands &)>;
 
 public:
-  static OperationFactory &instance();
+  static OperationFactory &get();
 
 private:
   OperationFactory();