From 9b7dc2f2ab9bd4ef3b8b9a0fea867876a26c3573 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=98=A4=ED=98=95=EC=84=9D/On-Device=20Lab=28SR=29/Staff?= =?utf8?q?=20Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Wed, 17 Jul 2019 13:56:29 +0900 Subject: [PATCH] Add gtest to prepare multiple execution instance run (#5568) * Test to prepare multiple execution instance run Introduce gtest to prepare multiple execution instance using one executor Move interpreter gtest into exec to match with core directory structure Signed-off-by: Hyeongseok Oh * Use expected value array --- runtimes/neurun/test/core/exec/ExecInstance.cc | 217 +++++++++++++++++++++ .../test/{ => core/exec}/interp/ExecManager.cc | 0 2 files changed, 217 insertions(+) create mode 100644 runtimes/neurun/test/core/exec/ExecInstance.cc rename runtimes/neurun/test/{ => core/exec}/interp/ExecManager.cc (100%) diff --git a/runtimes/neurun/test/core/exec/ExecInstance.cc b/runtimes/neurun/test/core/exec/ExecInstance.cc new file mode 100644 index 0000000..21627a8 --- /dev/null +++ b/runtimes/neurun/test/core/exec/ExecInstance.cc @@ -0,0 +1,217 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "graph/Graph.h" +#include "model/Model.h" +#include "compiler/Compiler.h" +#include "model/operation/AddNode.h" + +namespace +{ + +using namespace neurun::model; +using DataType = neurun::model::DataType; +using Model = neurun::model::Model; + +class CompiledMockUpModel +{ +public: + CompiledMockUpModel() + { + // Model: two elementwise add operation + // model input: lhs, rhs1 + // model output: second add result (result2) + // constant: rhs2 + // result1 <= (lhs + rhs) + // result2 <= (result1 + rhs2) + // lhs, rhs1, rh2, result1, result2 shape: {1, 2, 2, 1} + // activation: none (constant) + std::unique_ptr model = nnfw::cpp14::make_unique(); + // 1st add operands (result1 <= lhs + rhs1) + Shape shape{1, 2, 2, 1}; + TypeInfo type{DataType::FLOAT32}; + static float rhs2_data[4] = {3, 1, -1, 5}; + auto operand_lhs = model->operands.emplace(shape, type); + auto operand_rhs1 = model->operands.emplace(shape, type); + auto operand_result1 = model->operands.emplace(shape, type); + auto operand_rhs2 = model->operands.emplace(shape, type); + auto operand_result2 = model->operands.emplace(shape, type); + model->operands.at(operand_rhs2) + .data(nnfw::cpp14::make_unique(reinterpret_cast(&rhs2_data), + 16)); + // 2nd add operations (result2 <= result1 + rhs2) + operation::AddNode::Param param1; + param1.activation = neurun::model::Activation::NONE; + auto input_set1 = OperandIndexSequence{operand_lhs, operand_rhs1}; + auto output_set1 = OperandIndexSequence{operand_result1}; + model->operations.push( + nnfw::cpp14::make_unique(input_set1, output_set1, param1)); + operation::AddNode::Param param2; + param2.activation = neurun::model::Activation::NONE; + auto input_set2 = OperandIndexSequence{operand_result1, operand_rhs2}; + auto output_set2 = OperandIndexSequence{operand_result2}; + model->operations.push( + nnfw::cpp14::make_unique(input_set2, output_set2, param2)); + // Identify model inputs and outputs + model->inputs.append(operand_lhs); + model->inputs.append(operand_rhs1); + model->outputs.append(operand_result2); + graph = std::make_shared<::neurun::graph::Graph>(std::move(model)); + graph->finishBuilding(); + + // Compile + auto compiler = new neurun::compiler::Compiler{graph}; + compiler->compile(); + compiler->release(executor); + } + +public: + std::shared_ptr<::neurun::graph::Graph> graph; + std::shared_ptr executor; +}; + +TEST(ExecInstance, simple) +{ + auto mockup = CompiledMockUpModel(); + auto graph = mockup.graph; + auto executor = mockup.executor; + + auto input1 = IOIndex{0}; + auto input2 = IOIndex{1}; + auto output = IOIndex{0}; + + const float input1_buffer[4] = {1, 0, -1, -2}; + const float input2_buffer[4] = {1, -3, 2, -4}; + float output_buffer[4] = {}; + const float output_expected[4] = {5, -2, 0, -1}; + + executor->setInput(input1, reinterpret_cast(input1_buffer), 16); + executor->setInput(input2, reinterpret_cast(input2_buffer), 16); + executor->setOutput(output, reinterpret_cast(output_buffer), 16); + executor->execute(); + + for (auto i = 0; i < 4; i++) + { + EXPECT_EQ(output_buffer[i], output_expected[i]); + } +} + +TEST(ExecInstance, twoCompile) +{ + auto mockup = CompiledMockUpModel(); + auto graph = mockup.graph; + auto executor1 = mockup.executor; + + auto input1 = IOIndex{0}; + auto input2 = IOIndex{1}; + auto output = IOIndex{0}; + + const float exe1_input1_buffer[4] = {1, 0, -1, -2}; + const float exe1_input2_buffer[4] = {1, -3, 2, -4}; + float exe1_output_buffer[4] = {}; + const float exe1_output_expected[4] = {5, -2, 0, -1}; + + executor1->setInput(input1, reinterpret_cast(exe1_input1_buffer), 16); + executor1->setInput(input2, reinterpret_cast(exe1_input2_buffer), 16); + executor1->setOutput(output, reinterpret_cast(exe1_output_buffer), 16); + + // Make new executor: compile again + auto compiler = new neurun::compiler::Compiler{graph}; + compiler->compile(); + std::shared_ptr executor2; + compiler->release(executor2); + + const float exe2_input1_buffer[4] = {2, 1, -2, 0}; + const float exe2_input2_buffer[4] = {-3, 3, 1, 2}; + float exe2_output_buffer[4] = {}; + const float exe2_output_expected[4] = {2, 5, -2, 7}; + + executor2->setInput(input1, reinterpret_cast(exe2_input1_buffer), 16); + executor2->setInput(input2, reinterpret_cast(exe2_input2_buffer), 16); + executor2->setOutput(output, reinterpret_cast(exe2_output_buffer), 16); + + executor1->execute(); + for (auto i = 0; i < 4; i++) + { + EXPECT_EQ(exe1_output_expected[i], exe1_output_expected[i]); + } + + executor2->execute(); + for (auto i = 0; i < 4; i++) + { + EXPECT_EQ(exe2_output_buffer[i], exe2_output_expected[i]); + } +} + +// TODO 1: Support two initialized execution instance then ordered execution +// Need to introduce execution instance +// +// auto exec_instance1 = new neurun::exec::ExecInstance{executor}; +// auto exec_instance2 = new neurun::exec::ExecInstance{executor}; +// exec_instance1->setInput(...); +// exec_instance1->setOutput(...); +// exec_instance2->setInput(...); +// exec_instance2->setOutput(...); +// exec_instance1->execute(); +// ... use output of exec_instance1 +// exec_instance2->execute(); +// ... use output of exec_instance2 + +// TODO 2: Support multi-thread execution +// If executor(backend) can support, these instances run independently +// Otherwise, execute() shuold wait other execution end (need lock/unlock mutex) +// +// ... in thread1 +// auto exec_instance = new neurun::exec::ExecInstance{executor}; +// exec_instance->setInput(...); +// exec_instance->setOutput(...); +// exec_instance->execute(); +// +// .. in thread2 +// auto exec_instance = new neurun::exec::ExecInstance{executor}; +// exec_instance->setInput(...); +// exec_instance->setOutput(...); +// exec_instance->execute(); + +// TODO 3: Support asynchronous execution +// If executor(backend) can support, execute() makes new thread and return +// and support wait function +// Otherwise, instance run until execution end and return (wait function do nothing) +// +// auto exec_instance1 = new neurun::exec::ExecInstance{executor}; +// exec_instance1->setInput(...); +// exec_instance1->setOutput(...); +// exec_instance1->execute(); +// auto exec_instance2 = new neurun::exec::ExecInstance{executor}; +// exec_instance2->setInput(...); +// exec_instance2->setOutput(...); +// exec_instance2->execute(); +// exec_instance1->wait(); +// ... use output of exec_instance1 +// exec_instance1->setInput(...); +// exec_instance1->setOutput(...); +// exec_instance2->wait(); +// ... use output of exec_instance2 +// exec_instance2->setInput(...); +// exec_instance2->setOutput(...); +// exec_instance1->wait(); +// ... use output of exec_instance1 +// exec_instance2->wait(); +// ... use output of exec_instance2 + +} // namespace diff --git a/runtimes/neurun/test/interp/ExecManager.cc b/runtimes/neurun/test/core/exec/interp/ExecManager.cc similarity index 100% rename from runtimes/neurun/test/interp/ExecManager.cc rename to runtimes/neurun/test/core/exec/interp/ExecManager.cc -- 2.7.4