--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "graph/Graph.h"
+#include "model/Model.h"
+#include "compiler/Compiler.h"
+#include "model/operation/AddNode.h"
+
+namespace
+{
+
+using namespace neurun::model;
+using DataType = neurun::model::DataType;
+using Model = neurun::model::Model;
+
+class CompiledMockUpModel
+{
+public:
+ CompiledMockUpModel()
+ {
+ // Model: two elementwise add operation
+ // model input: lhs, rhs1
+ // model output: second add result (result2)
+ // constant: rhs2
+ // result1 <= (lhs + rhs)
+ // result2 <= (result1 + rhs2)
+ // lhs, rhs1, rh2, result1, result2 shape: {1, 2, 2, 1}
+ // activation: none (constant)
+ std::unique_ptr<neurun::model::Model> model = nnfw::cpp14::make_unique<neurun::model::Model>();
+ // 1st add operands (result1 <= lhs + rhs1)
+ Shape shape{1, 2, 2, 1};
+ TypeInfo type{DataType::FLOAT32};
+ static float rhs2_data[4] = {3, 1, -1, 5};
+ auto operand_lhs = model->operands.emplace(shape, type);
+ auto operand_rhs1 = model->operands.emplace(shape, type);
+ auto operand_result1 = model->operands.emplace(shape, type);
+ auto operand_rhs2 = model->operands.emplace(shape, type);
+ auto operand_result2 = model->operands.emplace(shape, type);
+ model->operands.at(operand_rhs2)
+ .data(nnfw::cpp14::make_unique<CachedData>(reinterpret_cast<const uint8_t *>(&rhs2_data),
+ 16));
+ // 2nd add operations (result2 <= result1 + rhs2)
+ operation::AddNode::Param param1;
+ param1.activation = neurun::model::Activation::NONE;
+ auto input_set1 = OperandIndexSequence{operand_lhs, operand_rhs1};
+ auto output_set1 = OperandIndexSequence{operand_result1};
+ model->operations.push(
+ nnfw::cpp14::make_unique<operation::AddNode>(input_set1, output_set1, param1));
+ operation::AddNode::Param param2;
+ param2.activation = neurun::model::Activation::NONE;
+ auto input_set2 = OperandIndexSequence{operand_result1, operand_rhs2};
+ auto output_set2 = OperandIndexSequence{operand_result2};
+ model->operations.push(
+ nnfw::cpp14::make_unique<operation::AddNode>(input_set2, output_set2, param2));
+ // Identify model inputs and outputs
+ model->inputs.append(operand_lhs);
+ model->inputs.append(operand_rhs1);
+ model->outputs.append(operand_result2);
+ graph = std::make_shared<::neurun::graph::Graph>(std::move(model));
+ graph->finishBuilding();
+
+ // Compile
+ auto compiler = new neurun::compiler::Compiler{graph};
+ compiler->compile();
+ compiler->release(executor);
+ }
+
+public:
+ std::shared_ptr<::neurun::graph::Graph> graph;
+ std::shared_ptr<neurun::exec::IExecutor> executor;
+};
+
+TEST(ExecInstance, simple)
+{
+ auto mockup = CompiledMockUpModel();
+ auto graph = mockup.graph;
+ auto executor = mockup.executor;
+
+ auto input1 = IOIndex{0};
+ auto input2 = IOIndex{1};
+ auto output = IOIndex{0};
+
+ const float input1_buffer[4] = {1, 0, -1, -2};
+ const float input2_buffer[4] = {1, -3, 2, -4};
+ float output_buffer[4] = {};
+ const float output_expected[4] = {5, -2, 0, -1};
+
+ executor->setInput(input1, reinterpret_cast<const void *>(input1_buffer), 16);
+ executor->setInput(input2, reinterpret_cast<const void *>(input2_buffer), 16);
+ executor->setOutput(output, reinterpret_cast<void *>(output_buffer), 16);
+ executor->execute();
+
+ for (auto i = 0; i < 4; i++)
+ {
+ EXPECT_EQ(output_buffer[i], output_expected[i]);
+ }
+}
+
+TEST(ExecInstance, twoCompile)
+{
+ auto mockup = CompiledMockUpModel();
+ auto graph = mockup.graph;
+ auto executor1 = mockup.executor;
+
+ auto input1 = IOIndex{0};
+ auto input2 = IOIndex{1};
+ auto output = IOIndex{0};
+
+ const float exe1_input1_buffer[4] = {1, 0, -1, -2};
+ const float exe1_input2_buffer[4] = {1, -3, 2, -4};
+ float exe1_output_buffer[4] = {};
+ const float exe1_output_expected[4] = {5, -2, 0, -1};
+
+ executor1->setInput(input1, reinterpret_cast<const void *>(exe1_input1_buffer), 16);
+ executor1->setInput(input2, reinterpret_cast<const void *>(exe1_input2_buffer), 16);
+ executor1->setOutput(output, reinterpret_cast<void *>(exe1_output_buffer), 16);
+
+ // Make new executor: compile again
+ auto compiler = new neurun::compiler::Compiler{graph};
+ compiler->compile();
+ std::shared_ptr<neurun::exec::IExecutor> executor2;
+ compiler->release(executor2);
+
+ const float exe2_input1_buffer[4] = {2, 1, -2, 0};
+ const float exe2_input2_buffer[4] = {-3, 3, 1, 2};
+ float exe2_output_buffer[4] = {};
+ const float exe2_output_expected[4] = {2, 5, -2, 7};
+
+ executor2->setInput(input1, reinterpret_cast<const void *>(exe2_input1_buffer), 16);
+ executor2->setInput(input2, reinterpret_cast<const void *>(exe2_input2_buffer), 16);
+ executor2->setOutput(output, reinterpret_cast<void *>(exe2_output_buffer), 16);
+
+ executor1->execute();
+ for (auto i = 0; i < 4; i++)
+ {
+ EXPECT_EQ(exe1_output_expected[i], exe1_output_expected[i]);
+ }
+
+ executor2->execute();
+ for (auto i = 0; i < 4; i++)
+ {
+ EXPECT_EQ(exe2_output_buffer[i], exe2_output_expected[i]);
+ }
+}
+
+// TODO 1: Support two initialized execution instance then ordered execution
+// Need to introduce execution instance
+//
+// auto exec_instance1 = new neurun::exec::ExecInstance{executor};
+// auto exec_instance2 = new neurun::exec::ExecInstance{executor};
+// exec_instance1->setInput(...);
+// exec_instance1->setOutput(...);
+// exec_instance2->setInput(...);
+// exec_instance2->setOutput(...);
+// exec_instance1->execute();
+// ... use output of exec_instance1
+// exec_instance2->execute();
+// ... use output of exec_instance2
+
+// TODO 2: Support multi-thread execution
+// If executor(backend) can support, these instances run independently
+// Otherwise, execute() shuold wait other execution end (need lock/unlock mutex)
+//
+// ... in thread1
+// auto exec_instance = new neurun::exec::ExecInstance{executor};
+// exec_instance->setInput(...);
+// exec_instance->setOutput(...);
+// exec_instance->execute();
+//
+// .. in thread2
+// auto exec_instance = new neurun::exec::ExecInstance{executor};
+// exec_instance->setInput(...);
+// exec_instance->setOutput(...);
+// exec_instance->execute();
+
+// TODO 3: Support asynchronous execution
+// If executor(backend) can support, execute() makes new thread and return
+// and support wait function
+// Otherwise, instance run until execution end and return (wait function do nothing)
+//
+// auto exec_instance1 = new neurun::exec::ExecInstance{executor};
+// exec_instance1->setInput(...);
+// exec_instance1->setOutput(...);
+// exec_instance1->execute();
+// auto exec_instance2 = new neurun::exec::ExecInstance{executor};
+// exec_instance2->setInput(...);
+// exec_instance2->setOutput(...);
+// exec_instance2->execute();
+// exec_instance1->wait();
+// ... use output of exec_instance1
+// exec_instance1->setInput(...);
+// exec_instance1->setOutput(...);
+// exec_instance2->wait();
+// ... use output of exec_instance2
+// exec_instance2->setInput(...);
+// exec_instance2->setOutput(...);
+// exec_instance1->wait();
+// ... use output of exec_instance1
+// exec_instance2->wait();
+// ... use output of exec_instance2
+
+} // namespace