#include "exec/IExecutor.h"
#include "IODescription.h"
+#include <thread>
+
namespace neurun
{
namespace exec
void setOutput(const model::IOIndex &index, const model::TypeInfo &type,
const model::Shape &shape, void *buffer, size_t length);
/**
- * @brief Start execution
+ * @brief Execution
* @note It should be called after setting input and output buffer
*/
void execute();
+ /**
+ * @brief Start asynchronous execution
+ * @note It returns after execution thread is started
+ * It should be called after setting input and output buffer
+ */
+ void startExecute(void);
+
+ /**
+ * @brief Return when execution is finished
+ * @note It waits until execution is finished
+ */
+ void waitFinish(void);
+
private:
const std::shared_ptr<IExecutor> _executor;
IODescription _io_desc;
+ std::unique_ptr<std::thread> _exec_thread;
};
} // namespace exec
#include "exec/Execution.h"
+#include "util/logging.h"
+
namespace neurun
{
namespace exec
std::move(nnfw::cpp14::make_unique<OutputDesc>(info, buffer, length));
}
-void Execution::execute() { _executor->execute(_io_desc); }
+void Execution::execute()
+{
+ VERBOSE(Execution) << "Start execution" << std::endl;
+
+ _executor->execute(_io_desc);
+
+ VERBOSE(Execution) << "Execution finished" << std::endl;
+}
+
+void Execution::startExecute()
+{
+ VERBOSE(Execution) << "Create asynchronous execution thread" << std::endl;
+
+ _exec_thread = nnfw::cpp14::make_unique<std::thread>(&Execution::execute, this);
+}
+
+void Execution::waitFinish()
+{
+ VERBOSE(Execution) << "Wait to finish execution" << std::endl;
+
+ _exec_thread->join();
+}
} // namespace exec
} // namespace neurun
}
}
-// TODO: Support asynchronous execution
-// If executor(backend) can support, execute() makes new thread and return
-// and support wait function
-// Otherwise, instance run until execution end and return (wait function do nothing)
-//
-// auto exec_instance1 = new neurun::exec::ExecInstance{executor};
-// exec_instance1->setInput(...);
-// exec_instance1->setOutput(...);
-// exec_instance1->execute();
-// auto exec_instance2 = new neurun::exec::ExecInstance{executor};
-// exec_instance2->setInput(...);
-// exec_instance2->setOutput(...);
-// exec_instance2->execute();
-// exec_instance1->wait();
-// ... use output of exec_instance1
-// exec_instance1->setInput(...);
-// exec_instance1->setOutput(...);
-// exec_instance2->wait();
-// ... use output of exec_instance2
-// exec_instance2->setInput(...);
-// exec_instance2->setOutput(...);
-// exec_instance1->wait();
-// ... use output of exec_instance1
-// exec_instance2->wait();
-// ... use output of exec_instance2
+// Support asynchronous execution
+TEST(ExecInstance, async)
+{
+ auto mockup = CompiledMockUpModel();
+ auto graph = mockup.graph;
+ auto executor = mockup.executor;
+
+ auto input1 = IOIndex{0};
+ auto input2 = IOIndex{1};
+ auto output = IOIndex{0};
+
+ const float input1_buffer[4] = {1, 0, -1, -2};
+ const float input2_buffer[4] = {1, -3, 2, -4};
+ float output_buffer[4] = {};
+ const float output_expected[4] = {5, -2, 0, -1};
+
+ auto execution = new neurun::exec::Execution(executor);
+
+ execution->setInput(input1, reinterpret_cast<const void *>(input1_buffer), 16);
+ execution->setInput(input2, reinterpret_cast<const void *>(input2_buffer), 16);
+ execution->setOutput(output, reinterpret_cast<void *>(output_buffer), 16);
+ execution->startExecute();
+ execution->waitFinish();
+
+ for (auto i = 0; i < 4; i++)
+ {
+ EXPECT_EQ(output_buffer[i], output_expected[i]);
+ }
+
+ delete execution;
+}
} // namespace