[neuurn] Use mutex on ExecutorBase (#6131)
author오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Fri, 2 Aug 2019 05:54:34 +0000 (14:54 +0900)
committer이한종/On-Device Lab(SR)/Engineer/삼성전자 <hanjoung.lee@samsung.com>
Fri, 2 Aug 2019 05:54:34 +0000 (14:54 +0900)
* Use mutex on ExecutorBase

Use mutex on ExecutorBase for thread-safe executor
Enable multi-thread execution test

Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
* Use std::lock_guard

runtimes/neurun/core/src/exec/ExecutorBase.cc
runtimes/neurun/core/src/exec/ExecutorBase.h
runtimes/neurun/test/core/exec/ExecInstance.cc

index 3ed3abf..8590387 100644 (file)
@@ -28,7 +28,7 @@ ExecutorBase::ExecutorBase(const std::shared_ptr<const model::Model> &model,
                            std::unique_ptr<backend::MemoryManagerSet> mem_mgrs)
     : _observers(), _model{model}, _subgraphs{std::move(subgraphs)},
       _operand_context{operand_context}, _lower_info{std::move(lower_info)},
-      _mem_mgrs{std::move(mem_mgrs)}
+      _mem_mgrs{std::move(mem_mgrs)}, _mutex()
 {
   // DO NOTHING
 }
@@ -76,6 +76,11 @@ std::unique_ptr<ISink> ExecutorBase::sink(const model::IOIndex &index, const mod
 
 void ExecutorBase::execute(const IODescription &desc)
 {
+  // For thread-safe, use mutex
+  // TODO: if all used backends on this executor are thread-safe,
+  //       do not need to use mutex (otherwise, use mutex)
+  std::lock_guard<std::mutex> lock(_mutex);
+
   std::vector<std::unique_ptr<ISource>> sources{_model->inputs.size()};
   std::vector<std::unique_ptr<ISink>> sinks{_model->outputs.size()};
 
index 7b8fefd..ef66111 100644 (file)
@@ -17,6 +17,8 @@
 #ifndef __NEURUN_EXEC_EXECUTOR_BASE_H__
 #define __NEURUN_EXEC_EXECUTOR_BASE_H__
 
+#include <mutex>
+
 #include "Source.h"
 #include "exec/ExecutionObservers.h"
 #include "Sink.h"
@@ -113,6 +115,7 @@ protected:
   std::shared_ptr<compiler::OperandContext> _operand_context;
   std::unique_ptr<graph::LowerInfoMap> _lower_info;
   std::unique_ptr<backend::MemoryManagerSet> _mem_mgrs;
+  std::mutex _mutex;
 };
 
 } // namespace exec
index 07725ef..35dd527 100644 (file)
@@ -211,22 +211,6 @@ TEST(ExecInstance, twoExecution)
   delete execution2;
 }
 
-// TODO 1: Support multi-thread execution
-//         If executor(backend) can support, these instances run independently
-//         Otherwise, execute() shuold wait other execution end (need lock/unlock mutex)
-//
-// ... in thread1
-// auto exec_instance = new neurun::exec::ExecInstance{executor};
-// exec_instance->setInput(...);
-// exec_instance->setOutput(...);
-// exec_instance->execute();
-//
-// .. in thread2
-// auto exec_instance = new neurun::exec::ExecInstance{executor};
-// exec_instance->setInput(...);
-// exec_instance->setOutput(...);
-// exec_instance->execute();
-
 class Inference
 {
 public:
@@ -260,7 +244,8 @@ private:
   std::shared_ptr<neurun::exec::IExecutor> &_executor;
 };
 
-TEST(ExecInstance, DISABLED_twoThreads)
+// Support multi-thread execution
+TEST(ExecInstance, twoThreads)
 {
   auto mockup = CompiledMockUpModel();
   auto executor = mockup.executor;
@@ -292,10 +277,10 @@ TEST(ExecInstance, DISABLED_twoThreads)
   }
 }
 
-// TODO 2: Support asynchronous execution
-//         If executor(backend) can support, execute() makes new thread and return
-//         and support wait function
-//         Otherwise, instance run until execution end and return (wait function do nothing)
+// TODO: Support asynchronous execution
+//       If executor(backend) can support, execute() makes new thread and return
+//       and support wait function
+//       Otherwise, instance run until execution end and return (wait function do nothing)
 //
 // auto exec_instance1 = new neurun::exec::ExecInstance{executor};
 // exec_instance1->setInput(...);