[neurun] Introduced scheduler tests (#6789)
authorIvan Vagin/AI Tools Lab /SRR/Engineer/삼성전자 <ivan.vagin@samsung.com>
Thu, 22 Aug 2019 23:18:43 +0000 (08:18 +0900)
committer이한종/On-Device Lab(SR)/Engineer/삼성전자 <hanjoung.lee@samsung.com>
Thu, 22 Aug 2019 23:18:43 +0000 (08:18 +0900)
* [neurun] Introduced scheduler tests

* implemented infrastructure for scheduler tests
* implemented scheduler test with straight graph and known execution
time

Signed-off-by: Ivan Vagin <ivan.vagin@samsung.com>
* Prevented memory leak

Signed-off-by: Ivan Vagin <ivan.vagin@samsung.com>
* Run test three times - one time for each backend

Signed-off-by: Ivan Vagin <ivan.vagin@samsung.com>
runtimes/neurun/test/core/compiler/Scheduler.cc [new file with mode: 0644]

diff --git a/runtimes/neurun/test/core/compiler/Scheduler.cc b/runtimes/neurun/test/core/compiler/Scheduler.cc
new file mode 100644 (file)
index 0000000..5e5cb6c
--- /dev/null
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <compiler/Scheduler.h>
+#include <backend/ExecTime.h>
+
+#include <model/Model.h>
+#include <model/Shape.h>
+#include <model/InternalType.h>
+#include <model/TypeInfo.h>
+#include <model/DataType.h>
+
+#include <model/operation/AddNode.h>
+#include <model/operation/MulNode.h>
+#include <model/operation/FullyConnectedNode.h>
+
+#include <gtest/gtest.h>
+
+namespace
+{
+using namespace neurun;
+using namespace model;
+using namespace backend;
+
+//
+// Mock backends classes
+//
+
+struct MockConfigCPU : public IConfig
+{
+  std::string id() override { return "cpu"; }
+  void initialize() override{};
+  bool SupportSubTensorAlloc() override { return false; }
+};
+
+struct MockBackendCPU : public Backend
+{
+  std::shared_ptr<IConfig> config() const override { return std::make_shared<MockConfigCPU>(); }
+  std::unique_ptr<BackendContext> newContext(const Operands &) const override
+  {
+    return std::unique_ptr<BackendContext>(
+        new BackendContext{this, nullptr, nullptr, nullptr, nullptr});
+  }
+};
+
+struct MockConfigGPU : public IConfig
+{
+  std::string id() override { return "gpu"; }
+  void initialize() override{};
+  bool SupportSubTensorAlloc() override { return false; }
+};
+
+struct MockBackendGPU : public Backend
+{
+  std::shared_ptr<IConfig> config() const override { return std::make_shared<MockConfigGPU>(); }
+  std::unique_ptr<BackendContext> newContext(const Operands &) const override
+  {
+    return std::unique_ptr<BackendContext>(
+        new BackendContext{this, nullptr, nullptr, nullptr, nullptr});
+  }
+};
+
+struct MockConfigNPU : public IConfig
+{
+  std::string id() override { return "npu"; }
+  void initialize() override{};
+  bool SupportSubTensorAlloc() override { return false; }
+};
+
+struct MockBackendNPU : public Backend
+{
+  std::shared_ptr<IConfig> config() const override { return std::make_shared<MockConfigNPU>(); }
+  std::unique_ptr<BackendContext> newContext(const Operands &) const override
+  {
+    return std::unique_ptr<BackendContext>(
+        new BackendContext{this, nullptr, nullptr, nullptr, nullptr});
+  }
+};
+
+//
+// Constants
+//
+
+const int OPERAND_ELEMS = 268203;
+const int OPERAND_SIZE = OPERAND_ELEMS * 4;
+
+const std::string LINEAR("Linear");
+const std::string DATAFLOW("Dataflow");
+const std::string PARALLEL("Parallel");
+
+//
+// Helper functions
+//
+
+// Set executor through environment variable
+void setExecutor(const std::string &executor) { setenv("EXECUTOR", executor.c_str(), true); }
+
+// Calculate operation size by addition sizes of all input and output operands
+uint32_t calcOpSize(const graph::Graph &graph, const OperationIndex &op_idx)
+{
+  uint32_t size = 0;
+  for (const auto &input : graph.operations().at(op_idx).getInputs())
+    size += graph.operands().at(input).info().total_size();
+  for (const auto &output : graph.operations().at(op_idx).getOutputs())
+    size += graph.operands().at(output).info().total_size();
+  return size;
+}
+
+// Create straight graph: Add->Mul->FullyConnected
+std::unique_ptr<graph::Graph> createStraightGraph()
+{
+  auto graph = nnfw::cpp14::make_unique<graph::Graph>(nnfw::cpp14::make_unique<Model>());
+
+  // Create operands
+  const TypeInfo float_op(DataType::FLOAT32);
+  auto add_lhs_index = graph->addOperand(Shape{OPERAND_ELEMS}, float_op);
+  auto add_rhs_index = graph->addOperand(Shape{OPERAND_ELEMS}, float_op);
+  auto add_out_index = graph->addOperand(Shape{OPERAND_ELEMS}, float_op);
+  auto mul_lhs_index = graph->addOperand(Shape{OPERAND_ELEMS}, float_op);
+  auto mul_out_index = graph->addOperand(Shape{OPERAND_ELEMS}, float_op);
+  auto fc_weights_index = graph->addOperand(Shape{OPERAND_ELEMS}, float_op);
+  auto fc_out_index = graph->addOperand(Shape{OPERAND_ELEMS}, float_op);
+
+  // Create AddNode
+  operation::AddNode::Param add_param{Activation::NONE};
+  auto add_op = std::unique_ptr<operation::AddNode>(
+      new operation::AddNode({add_lhs_index, add_rhs_index}, {add_out_index}, add_param));
+  graph->addOperation(std::move(add_op));
+
+  // Create MulNode
+  operation::MulNode::Param mul_param{Activation::NONE};
+  auto mul_op = std::unique_ptr<operation::MulNode>(
+      new operation::MulNode({mul_lhs_index, add_out_index}, {mul_out_index}, mul_param));
+  graph->addOperation(std::move(mul_op));
+
+  // Create FullyConnectedNode
+  operation::FullyConnectedNode::Param fc_param{Activation::NONE};
+  auto fc_op = std::unique_ptr<operation::FullyConnectedNode>(new operation::FullyConnectedNode(
+      {fc_weights_index, mul_out_index}, {fc_out_index}, fc_param));
+  graph->addOperation(std::move(fc_op));
+
+  graph->finishBuilding();
+  return graph;
+}
+
+// Set same execution time for all given backends/operations
+void setOperationsExecutionTime(const std::vector<const Backend *> &backends,
+                                const std::vector<std::string> &op_names,
+                                const std::vector<uint32_t> &op_sizes, int64_t exec_time)
+{
+  assert(op_names.size() == op_sizes.size());
+  ExecTime et(backends);
+  for (int i = 0; i < op_names.size(); ++i)
+  {
+    for (auto &backend : backends)
+      et.updateOperationExecTime(backend, op_names[i], false, op_sizes[i], exec_time);
+  }
+  et.uploadOperationsExecTime();
+}
+
+// Set same permutation time between all given backends
+void setPermutationsExecutionTime(const std::vector<const Backend *> &backends,
+                                  const int operand_size, const int64_t exec_time)
+{
+  ExecTime et(backends);
+  for (const auto &backend : backends)
+  {
+    for (auto &other_backend : backends)
+    {
+      if (backend == other_backend)
+        continue;
+      et.updatePermuteTime(backend, other_backend, false, operand_size, exec_time);
+    }
+  }
+  et.uploadOperationsExecTime();
+}
+
+//
+// Tests setup/teardown
+//
+
+// SetUp/TearDown methods runs before/after each test and performs actions common for each test
+class SchedulerTest : public ::testing::Test
+{
+protected:
+  void SetUp() override
+  {
+    // Initialize mock backends
+    _cpu_backend = new MockBackendCPU();
+    _gpu_backend = new MockBackendGPU();
+    _npu_backend = new MockBackendNPU();
+    _mock_backends = {_cpu_backend, _gpu_backend, _npu_backend};
+
+    // Remember original value of 'EXECUTOR' environment variable
+    char *executor = std::getenv("EXECUTOR");
+    _original_executor = executor == nullptr ? "" : executor;
+  }
+
+  void TearDown() override
+  {
+    delete _cpu_backend;
+    delete _gpu_backend;
+    delete _npu_backend;
+    remove("exec_time.json");
+    setenv("EXECUTOR", _original_executor.c_str(), true);
+  }
+
+  const MockBackendCPU *_cpu_backend{nullptr};
+  const MockBackendGPU *_gpu_backend{nullptr};
+  const MockBackendNPU *_npu_backend{nullptr};
+  std::vector<const Backend *> _mock_backends;
+
+  std::string _original_executor;
+};
+
+class SchedulerTestWithStringParam : public SchedulerTest,
+                                     public testing::WithParamInterface<std::string>
+{
+};
+
+//
+// Scheduler tests
+//
+
+// Test scheduler behavior for straight graph with known execution time of all nodes and permutes.
+// This test is parameterized with executor name and runs three times - one time for each executor.
+TEST_P(SchedulerTestWithStringParam, straight_graph_known_exec_time)
+{
+  setExecutor(GetParam());
+
+  // Prepare graph
+  std::shared_ptr<graph::Graph> graph;
+  graph = createStraightGraph();
+  OperationIndex add_op_idx(0), mul_op_idx(1), fc_op_idx(2);
+  auto add_op_size = calcOpSize(*graph, add_op_idx);
+  auto mul_op_size = calcOpSize(*graph, mul_op_idx);
+  auto fc_op_size = calcOpSize(*graph, fc_op_idx);
+
+  // Test 1
+  // Expected behaviour: scheduler plan each node on different backend
+  {
+    // Prepare execution time
+    setPermutationsExecutionTime(_mock_backends, OPERAND_SIZE, 1);
+    setOperationsExecutionTime(_mock_backends, {"Add", "Mul", "FullyConnected"},
+                               {add_op_size, mul_op_size, fc_op_size}, 10000);
+    ExecTime et(_mock_backends);
+    et.updateOperationExecTime(_cpu_backend, "Add", false, add_op_size, 1);
+    et.updateOperationExecTime(_gpu_backend, "Mul", false, mul_op_size, 1);
+    et.updateOperationExecTime(_npu_backend, "FullyConnected", false, fc_op_size, 1);
+    et.uploadOperationsExecTime();
+
+    // Test scheduler
+    auto scheduler = compiler::Scheduler(graph->operands(), _mock_backends);
+    const auto backend_resolver = scheduler.schedule(*graph);
+    ASSERT_EQ(backend_resolver->getBackend(add_op_idx)->config()->id(), "cpu");
+    ASSERT_EQ(backend_resolver->getBackend(mul_op_idx)->config()->id(), "gpu");
+    ASSERT_EQ(backend_resolver->getBackend(fc_op_idx)->config()->id(), "npu");
+  }
+
+  // Test 2
+  // Expected behaviour: scheduler plan all nodes on single backend because of big transfer time
+  {
+    // Increase transfer time
+    setPermutationsExecutionTime(_mock_backends, OPERAND_SIZE, 100000);
+
+    // Test scheduler
+    auto scheduler = compiler::Scheduler(graph->operands(), _mock_backends);
+    const auto backend_resolver = scheduler.schedule(*graph);
+    ASSERT_EQ(backend_resolver->getBackend(add_op_idx)->config()->id(), "cpu");
+    ASSERT_EQ(backend_resolver->getBackend(mul_op_idx)->config()->id(), "cpu");
+    ASSERT_EQ(backend_resolver->getBackend(fc_op_idx)->config()->id(), "cpu");
+  }
+}
+INSTANTIATE_TEST_CASE_P(AllExecutors, SchedulerTestWithStringParam,
+                        testing::Values(LINEAR, DATAFLOW, PARALLEL));
+
+} // unnamed namespace