std::unique_ptr<backend::MemoryManagerSet> mem_mgrs)
: _observers(), _model{model}, _subgraphs{std::move(subgraphs)},
_operand_context{operand_context}, _lower_info{std::move(lower_info)},
- _mem_mgrs{std::move(mem_mgrs)}
+ _mem_mgrs{std::move(mem_mgrs)}, _mutex()
{
// DO NOTHING
}
void ExecutorBase::execute(const IODescription &desc)
{
+ // For thread-safe, use mutex
+ // TODO: if all used backends on this executor are thread-safe,
+ // do not need to use mutex (otherwise, use mutex)
+ std::lock_guard<std::mutex> lock(_mutex);
+
std::vector<std::unique_ptr<ISource>> sources{_model->inputs.size()};
std::vector<std::unique_ptr<ISink>> sinks{_model->outputs.size()};
#ifndef __NEURUN_EXEC_EXECUTOR_BASE_H__
#define __NEURUN_EXEC_EXECUTOR_BASE_H__
+#include <mutex>
+
#include "Source.h"
#include "exec/ExecutionObservers.h"
#include "Sink.h"
std::shared_ptr<compiler::OperandContext> _operand_context;
std::unique_ptr<graph::LowerInfoMap> _lower_info;
std::unique_ptr<backend::MemoryManagerSet> _mem_mgrs;
+ std::mutex _mutex;
};
} // namespace exec
delete execution2;
}
-// TODO 1: Support multi-thread execution
-// If executor(backend) can support, these instances run independently
-// Otherwise, execute() shuold wait other execution end (need lock/unlock mutex)
-//
-// ... in thread1
-// auto exec_instance = new neurun::exec::ExecInstance{executor};
-// exec_instance->setInput(...);
-// exec_instance->setOutput(...);
-// exec_instance->execute();
-//
-// .. in thread2
-// auto exec_instance = new neurun::exec::ExecInstance{executor};
-// exec_instance->setInput(...);
-// exec_instance->setOutput(...);
-// exec_instance->execute();
-
class Inference
{
public:
std::shared_ptr<neurun::exec::IExecutor> &_executor;
};
-TEST(ExecInstance, DISABLED_twoThreads)
+// Support multi-thread execution
+TEST(ExecInstance, twoThreads)
{
auto mockup = CompiledMockUpModel();
auto executor = mockup.executor;
}
}
-// TODO 2: Support asynchronous execution
-// If executor(backend) can support, execute() makes new thread and return
-// and support wait function
-// Otherwise, instance run until execution end and return (wait function do nothing)
+// TODO: Support asynchronous execution
+// If executor(backend) can support, execute() makes new thread and return
+// and support wait function
+// Otherwise, instance run until execution end and return (wait function do nothing)
//
// auto exec_instance1 = new neurun::exec::ExecInstance{executor};
// exec_instance1->setInput(...);