Add gtest to prepare multithread execution instance (#5898)
author오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Tue, 30 Jul 2019 07:21:41 +0000 (16:21 +0900)
committerGitHub Enterprise <noreply-CODE@samsung.com>
Tue, 30 Jul 2019 07:21:41 +0000 (16:21 +0900)
Introduce gtest to prepare multithread execution instance using one executor

Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
runtimes/neurun/test/core/exec/ExecInstance.cc

index a363ef3..480389e 100644 (file)
@@ -15,6 +15,7 @@
  */
 
 #include <gtest/gtest.h>
+#include <thread>
 
 #include "graph/Graph.h"
 #include "model/Model.h"
@@ -225,6 +226,71 @@ TEST(ExecInstance, twoExecution)
 // exec_instance->setOutput(...);
 // exec_instance->execute();
 
+class Inference
+{
+public:
+  Inference(const float (&input1)[4], const float (&input2)[4], float (&output)[4],
+            std::shared_ptr<neurun::exec::IExecutor> &executor)
+      : _input1{input1}, _input2{input2}, _output{output}, _executor{executor}
+  {
+    // DO NOTHING
+  }
+
+  void inference(void)
+  {
+    auto input1 = IOIndex{0};
+    auto input2 = IOIndex{1};
+    auto output1 = IOIndex{0};
+
+    auto execution = new neurun::exec::Execution(_executor);
+    execution->setInput(input1, reinterpret_cast<const void *>(_input1), 16);
+    execution->setInput(input2, reinterpret_cast<const void *>(_input2), 16);
+    execution->setOutput(output1, reinterpret_cast<void *>(_output), 16);
+
+    execution->execute();
+
+    delete execution;
+  }
+
+private:
+  const float (&_input1)[4];
+  const float (&_input2)[4];
+  float (&_output)[4];
+  std::shared_ptr<neurun::exec::IExecutor> &_executor;
+};
+
+TEST(ExecInstance, DISABLED_twoThreads)
+{
+  auto mockup = CompiledMockUpModel();
+  auto executor = mockup.executor;
+
+  const float exe1_input1_buffer[4] = {1, 0, -1, -2};
+  const float exe1_input2_buffer[4] = {1, -3, 2, -4};
+  float exe1_output_buffer[4] = {};
+  const float exe1_output_expected[4] = {5, -2, 0, -1};
+
+  Inference execution1{exe1_input1_buffer, exe1_input2_buffer, exe1_output_buffer, executor};
+
+  const float exe2_input1_buffer[4] = {2, 1, -2, 0};
+  const float exe2_input2_buffer[4] = {-3, 3, 1, 2};
+  float exe2_output_buffer[4] = {};
+  const float exe2_output_expected[4] = {2, 5, -2, 7};
+
+  Inference execution2{exe1_input2_buffer, exe2_input2_buffer, exe2_output_buffer, executor};
+
+  std::thread t1{&Inference::inference, &execution1};
+  std::thread t2{&Inference::inference, &execution2};
+
+  t1.join();
+  t2.join();
+
+  for (auto i = 0; i < 4; i++)
+  {
+    EXPECT_EQ(exe1_output_buffer[i], exe1_output_expected[i]);
+    EXPECT_EQ(exe2_output_buffer[i], exe2_output_expected[i]);
+  }
+}
+
 // TODO 2: Support asynchronous execution
 //         If executor(backend) can support, execute() makes new thread and return
 //         and support wait function