2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <gtest/gtest.h>
21 #include "compiler/Compiler.h"
22 #include "exec/Execution.h"
23 #include "ir/operation/BinaryArithmetic.h"
28 using namespace onert::ir;
30 class CompiledMockUpModel
35 // Model: two elementwise add operation
36 // model input: lhs, rhs1
37 // model output: second add result (result2)
39 // result1 <= (lhs + rhs)
40 // result2 <= (result1 + rhs2)
41 // lhs, rhs1, rh2, result1, result2 shape: {1, 2, 2, 1}
42 // activation: none (constant)
43 graph = std::make_shared<Graph>();
44 // 1st add operands (result1 <= lhs + rhs1)
45 Shape shape{1, 2, 2, 1};
46 TypeInfo type{DataType::FLOAT32};
47 static float rhs2_data[4] = {3, 1, -1, 5};
48 auto operand_lhs = graph->addOperand(shape, type);
49 auto operand_rhs1 = graph->addOperand(shape, type);
50 auto operand_result1 = graph->addOperand(shape, type);
51 auto operand_rhs2 = graph->addOperand(shape, type);
52 auto operand_result2 = graph->addOperand(shape, type);
55 .data(std::make_unique<CachedData>(reinterpret_cast<const uint8_t *>(&rhs2_data), 16));
56 // 2nd add operations (result2 <= result1 + rhs2)
57 operation::BinaryArithmetic::Param param1;
58 param1.arithmetic_type = operation::BinaryArithmetic::ArithmeticType::ADD;
59 param1.activation = Activation::NONE;
60 auto input_set1 = OperandIndexSequence{operand_lhs, operand_rhs1};
61 auto output_set1 = OperandIndexSequence{operand_result1};
63 std::make_unique<operation::BinaryArithmetic>(input_set1, output_set1, param1));
64 operation::BinaryArithmetic::Param param2;
65 param2.arithmetic_type = operation::BinaryArithmetic::ArithmeticType::ADD;
66 param2.activation = Activation::NONE;
67 auto input_set2 = OperandIndexSequence{operand_result1, operand_rhs2};
68 auto output_set2 = OperandIndexSequence{operand_result2};
70 std::make_unique<operation::BinaryArithmetic>(input_set2, output_set2, param2));
71 // Identify model inputs and outputs
72 graph->addInput(operand_lhs);
73 graph->addInput(operand_rhs1);
74 graph->addOutput(operand_result2);
75 graph->finishBuilding();
78 auto subgs = std::make_shared<onert::ir::Subgraphs>();
79 subgs->push(onert::ir::SubgraphIndex{0}, graph);
80 onert::compiler::Compiler compiler{subgs};
81 executors = compiler.compile();
85 std::shared_ptr<Graph> graph;
86 std::shared_ptr<onert::exec::ExecutorMap> executors;
89 TEST(ExecInstance, simple)
91 auto mockup = CompiledMockUpModel();
92 auto graph = mockup.graph;
93 auto executors = mockup.executors;
95 auto input1 = IOIndex{0};
96 auto input2 = IOIndex{1};
97 auto output = IOIndex{0};
99 const float input1_buffer[4] = {1, 0, -1, -2};
100 const float input2_buffer[4] = {1, -3, 2, -4};
101 float output_buffer[4] = {};
102 const float output_expected[4] = {5, -2, 0, -1};
104 onert::exec::Execution execution{executors};
106 execution.setInput(input1, reinterpret_cast<const void *>(input1_buffer), 16);
107 execution.setInput(input2, reinterpret_cast<const void *>(input2_buffer), 16);
108 execution.setOutput(output, reinterpret_cast<void *>(output_buffer), 16);
111 for (auto i = 0; i < 4; i++)
113 EXPECT_EQ(output_buffer[i], output_expected[i]);
117 TEST(ExecInstance, twoCompile)
119 auto mockup = CompiledMockUpModel();
120 auto graph = mockup.graph;
121 auto executors1 = mockup.executors;
122 onert::exec::Execution execution1{executors1};
124 auto input1 = IOIndex{0};
125 auto input2 = IOIndex{1};
126 auto output = IOIndex{0};
128 const float exe1_input1_buffer[4] = {1, 0, -1, -2};
129 const float exe1_input2_buffer[4] = {1, -3, 2, -4};
130 float exe1_output_buffer[4] = {};
131 const float exe1_output_expected[4] = {5, -2, 0, -1};
133 execution1.setInput(input1, reinterpret_cast<const void *>(exe1_input1_buffer), 16);
134 execution1.setInput(input2, reinterpret_cast<const void *>(exe1_input2_buffer), 16);
135 execution1.setOutput(output, reinterpret_cast<void *>(exe1_output_buffer), 16);
137 // Make new executor: compile again
138 auto subgs = std::make_shared<onert::ir::Subgraphs>();
139 subgs->push(onert::ir::SubgraphIndex{0}, graph);
140 onert::compiler::Compiler compiler{subgs};
141 std::shared_ptr<onert::exec::ExecutorMap> executors2 = compiler.compile();
142 onert::exec::Execution execution2{executors2};
144 const float exe2_input1_buffer[4] = {2, 1, -2, 0};
145 const float exe2_input2_buffer[4] = {-3, 3, 1, 2};
146 float exe2_output_buffer[4] = {};
147 const float exe2_output_expected[4] = {2, 5, -2, 7};
149 execution2.setInput(input1, reinterpret_cast<const void *>(exe2_input1_buffer), 16);
150 execution2.setInput(input2, reinterpret_cast<const void *>(exe2_input2_buffer), 16);
151 execution2.setOutput(output, reinterpret_cast<void *>(exe2_output_buffer), 16);
153 execution1.execute();
154 execution2.execute();
156 for (auto i = 0; i < 4; i++)
158 EXPECT_EQ(exe1_output_buffer[i], exe1_output_expected[i]);
159 EXPECT_EQ(exe2_output_buffer[i], exe2_output_expected[i]);
163 // Support two initialized execution instance then ordered execution
164 TEST(ExecInstance, twoExecution)
166 auto mockup = CompiledMockUpModel();
167 auto executors = mockup.executors;
168 auto input1 = IOIndex{0};
169 auto input2 = IOIndex{1};
170 auto output1 = IOIndex{0};
172 const float exe1_input1_buffer[4] = {1, 0, -1, -2};
173 const float exe1_input2_buffer[4] = {1, -3, 2, -4};
174 float exe1_output_buffer[4] = {};
175 const float exe1_output_expected[4] = {5, -2, 0, -1};
176 const float exe2_output_expected[4] = {2, 5, -2, 7};
178 onert::exec::Execution execution1{executors};
179 execution1.setInput(input1, reinterpret_cast<const void *>(exe1_input1_buffer), 16);
180 execution1.setInput(input2, reinterpret_cast<const void *>(exe1_input2_buffer), 16);
181 execution1.setOutput(output1, reinterpret_cast<void *>(exe1_output_buffer), 16);
183 const float exe2_input1_buffer[4] = {2, 1, -2, 0};
184 const float exe2_input2_buffer[4] = {-3, 3, 1, 2};
185 float exe2_output_buffer[4] = {};
187 // Make new execution
188 onert::exec::Execution execution2{executors};
189 execution2.setInput(input1, reinterpret_cast<const void *>(exe2_input1_buffer), 16);
190 execution2.setInput(input2, reinterpret_cast<const void *>(exe2_input2_buffer), 16);
191 execution2.setOutput(output1, reinterpret_cast<void *>(exe2_output_buffer), 16);
193 execution1.execute();
194 execution2.execute();
196 for (auto i = 0; i < 4; i++)
198 EXPECT_EQ(exe1_output_buffer[i], exe1_output_expected[i]);
199 EXPECT_EQ(exe2_output_buffer[i], exe2_output_expected[i]);
206 Inference(const float (&input1)[4], const float (&input2)[4], float (&output)[4],
207 std::shared_ptr<onert::exec::ExecutorMap> &executors)
208 : _input1{input1}, _input2{input2}, _output{output}, _executors{executors}
215 auto input1 = IOIndex{0};
216 auto input2 = IOIndex{1};
217 auto output1 = IOIndex{0};
219 onert::exec::Execution execution{_executors};
220 execution.setInput(input1, reinterpret_cast<const void *>(_input1), 16);
221 execution.setInput(input2, reinterpret_cast<const void *>(_input2), 16);
222 execution.setOutput(output1, reinterpret_cast<void *>(_output), 16);
228 const float (&_input1)[4];
229 const float (&_input2)[4];
231 std::shared_ptr<onert::exec::ExecutorMap> &_executors;
234 // Support multi-thread execution
235 TEST(ExecInstance, twoThreads)
237 auto mockup = CompiledMockUpModel();
238 auto executors = mockup.executors;
240 const float exe1_input1_buffer[4] = {1, 0, -1, -2};
241 const float exe1_input2_buffer[4] = {1, -3, 2, -4};
242 float exe1_output_buffer[4] = {};
243 const float exe1_output_expected[4] = {5, -2, 0, -1};
245 Inference execution1{exe1_input1_buffer, exe1_input2_buffer, exe1_output_buffer, executors};
247 const float exe2_input1_buffer[4] = {2, 1, -2, 0};
248 const float exe2_input2_buffer[4] = {-3, 3, 1, 2};
249 float exe2_output_buffer[4] = {};
250 const float exe2_output_expected[4] = {2, 5, -2, 7};
252 Inference execution2{exe2_input1_buffer, exe2_input2_buffer, exe2_output_buffer, executors};
254 std::thread t1{&Inference::inference, &execution1};
255 std::thread t2{&Inference::inference, &execution2};
260 for (auto i = 0; i < 4; i++)
262 EXPECT_EQ(exe1_output_buffer[i], exe1_output_expected[i]);
263 EXPECT_EQ(exe2_output_buffer[i], exe2_output_expected[i]);
267 // Support asynchronous execution
268 TEST(ExecInstance, async)
270 auto mockup = CompiledMockUpModel();
271 auto graph = mockup.graph;
272 auto executors = mockup.executors;
274 auto input1 = IOIndex{0};
275 auto input2 = IOIndex{1};
276 auto output = IOIndex{0};
278 const float input1_buffer[4] = {1, 0, -1, -2};
279 const float input2_buffer[4] = {1, -3, 2, -4};
280 float output_buffer[4] = {};
281 const float output_expected[4] = {5, -2, 0, -1};
283 onert::exec::Execution execution{executors};
285 execution.setInput(input1, reinterpret_cast<const void *>(input1_buffer), 16);
286 execution.setInput(input2, reinterpret_cast<const void *>(input2_buffer), 16);
287 execution.setOutput(output, reinterpret_cast<void *>(output_buffer), 16);
288 execution.startExecute();
289 execution.waitFinish();
291 for (auto i = 0; i < 4; i++)
293 EXPECT_EQ(output_buffer[i], output_expected[i]);