2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include "kernels/Add.h"
19 #include "kernels/TestUtils.h"
20 #include "luci_interpreter/TestMemoryManager.h"
22 namespace luci_interpreter
29 using namespace testing;
31 class AddTest : public ::testing::Test
34 void SetUp() override { _memory_manager = std::make_unique<TestMemoryManager>(); }
36 std::unique_ptr<IMemoryManager> _memory_manager;
39 // for quantized Add, the error shouldn't exceed step
40 float GetTolerance(float min, float max)
42 float kQuantizedStep = (max - min) / 255.0;
43 return kQuantizedStep;
46 TEST_F(AddTest, Uint8)
48 std::initializer_list<int32_t> base_shape = {2, 3, 1, 2};
49 std::initializer_list<float> base_data = {-0.3f, 2.3f, 0.9f, 0.5f, 0.8f, -1.1f,
50 1.2f, 2.8f, -1.6f, 0.0f, 0.7f, -2.2f};
51 std::initializer_list<int32_t> test_shapes[] = {
52 {1, 1, 3, 2}, {1, 3, 1, 2}, {2, 1, 3, 1}, {2, 3, 1, 1}};
53 std::initializer_list<float> test_data = {0.2f, 0.3f, -0.4f, 0.5f, 1.0f, 0.9f};
54 std::initializer_list<int32_t> output_shapes[] = {
55 {2, 3, 3, 2}, {2, 3, 1, 2}, {2, 3, 3, 2}, {2, 3, 1, 2}};
56 std::vector<std::vector<float>> output_data = {
57 {-0.1f, 2.6f, -0.7f, 2.8f, 0.7f, 3.0f, 1.1f, 0.8f, 0.5f, 1.0f, 1.9f, 1.4f,
58 1.0f, -0.8f, 0.4f, -0.6f, 1.8f, -0.2f, 1.4f, 3.0f, 0.8f, 3.0f, 2.2f, 3.0f,
59 -1.4f, 0.3f, -2.0f, 0.5f, -0.6f, 0.9f, 0.9f, -1.9f, 0.3f, -1.7f, 1.7f, -1.3f},
60 {-0.1f, 2.6f, 0.5f, 1.0f, 1.8f, -0.2f, 1.4f, 3.0f, -2.0f, 0.5f, 1.7f, -1.3f},
61 {-0.1f, 2.5f, 0.0f, 2.6f, -0.7f, 1.9f, 1.1f, 0.7f, 1.2f, 0.8f, 0.5f, 0.1f,
62 1.0f, -0.9f, 1.1f, -0.8f, 0.4f, -1.5f, 1.7f, 3.0f, 2.2f, 3.0f, 2.1f, 3.0f,
63 -1.1f, 0.5f, -0.6f, 1.0f, -0.7f, 0.9f, 1.2f, -1.7f, 1.7f, -1.2f, 1.6f, -1.3f},
64 {-0.1f, 2.5f, 1.2f, 0.8f, 0.4f, -1.5f, 1.7f, 3.0f, -0.6f, 1.0f, 1.6f, -1.3f}};
65 float kQuantizedTolerance = GetTolerance(-3.f, 3.f);
66 std::pair<float, int32_t> quant_param = quantizationParams<uint8_t>(-3.f, 3.f);
67 for (int i = 0; i < output_data.size(); i++)
69 Tensor input1_tensor = makeInputTensor<DataType::U8>(
70 base_shape, quant_param.first, quant_param.second, base_data, _memory_manager.get());
71 Tensor input2_tensor = makeInputTensor<DataType::U8>(
72 test_shapes[i], quant_param.first, quant_param.second, test_data, _memory_manager.get());
73 Tensor output_tensor =
74 makeOutputTensor(getElementType<uint8_t>(), quant_param.first, quant_param.second);
77 params.activation = Activation::NONE;
79 Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
81 _memory_manager->allocate_memory(output_tensor);
84 EXPECT_THAT(dequantizeTensorData(output_tensor),
85 FloatArrayNear(output_data[i], kQuantizedTolerance));
86 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shapes[i]));
88 // Re-run with exchanged inputs.
89 for (int i = 0; i < output_data.size(); i++)
91 Tensor input1_tensor = makeInputTensor<DataType::U8>(
92 test_shapes[i], quant_param.first, quant_param.second, test_data, _memory_manager.get());
93 Tensor input2_tensor = makeInputTensor<DataType::U8>(
94 base_shape, quant_param.first, quant_param.second, base_data, _memory_manager.get());
95 Tensor output_tensor =
96 makeOutputTensor(getElementType<uint8_t>(), quant_param.first, quant_param.second);
99 params.activation = Activation::NONE;
101 Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
103 _memory_manager->allocate_memory(output_tensor);
106 EXPECT_THAT(dequantizeTensorData(output_tensor),
107 FloatArrayNear(output_data[i], kQuantizedTolerance));
108 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shapes[i]));
112 TEST_F(AddTest, Float)
114 Shape base_shape = {2, 3, 1, 2};
115 std::vector<Shape> test_shapes{{1, 1, 3, 2}, {1, 3, 1, 2}, {2, 1, 3, 1}, {2, 3, 1, 1}};
116 std::vector<std::vector<float>> test_outputs = {
117 {0.0f, 2.6f, 0.0f, 2.8f, 0.7f, 3.2f, 1.1f, 0.8f, 0.5f, 1.0f, 1.9f, 1.4f,
118 1.0f, 0.0f, 0.4f, 0.0f, 1.8f, 0.0f, 1.4f, 3.1f, 0.8f, 3.3f, 2.2f, 3.7f,
119 0.0f, 0.3f, 0.0f, 0.5f, 0.0f, 0.9f, 0.9f, 0.0f, 0.3f, 0.0f, 1.7f, 0.0f},
120 {0.0f, 2.6f, 0.5f, 1.0f, 1.8f, 0.0f, 1.4f, 3.1f, 0.0f, 0.5f, 1.7f, 0.0f},
121 {0.0f, 2.5f, 0.0f, 2.6f, 0.0f, 1.9f, 1.1f, 0.7f, 1.2f, 0.8f, 0.5f, 0.1f,
122 1.0f, 0.0f, 1.1f, 0.0f, 0.4f, 0.0f, 1.7f, 3.3f, 2.2f, 3.8f, 2.1f, 3.7f,
123 0.0f, 0.5f, 0.0f, 1.0f, 0.0f, 0.9f, 1.2f, 0.0f, 1.7f, 0.0f, 1.6f, 0.0f},
124 {0.0f, 2.5f, 1.2f, 0.8f, 0.4f, 0.0f, 1.7f, 3.3f, 0.0f, 1.0f, 1.6f, 0.0f}};
125 std::vector<float> input1_data{-0.3f, 2.3f, 0.9f, 0.5f, 0.8f, -1.1f,
126 1.2f, 2.8f, -1.6f, 0.0f, 0.7f, -2.2f};
127 std::vector<float> input2_data{0.2f, 0.3f, -0.4f, 0.5f, 1.0f, 0.9f};
128 for (size_t i = 0; i < test_shapes.size(); ++i)
130 Tensor input1_tensor =
131 makeInputTensor<DataType::FLOAT32>(base_shape, input1_data, _memory_manager.get());
132 Tensor input2_tensor =
133 makeInputTensor<DataType::FLOAT32>(test_shapes[i], input2_data, _memory_manager.get());
134 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
137 params.activation = Activation::RELU;
139 Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
141 _memory_manager->allocate_memory(output_tensor);
144 EXPECT_THAT(extractTensorData<float>(output_tensor), FloatArrayNear(test_outputs[i], 0.0001f))
145 << "With shape number " << i;
147 // Re-run with exchanged inputs.
148 for (size_t i = 0; i < test_shapes.size(); ++i)
150 Tensor input1_tensor =
151 makeInputTensor<DataType::FLOAT32>(test_shapes[i], input2_data, _memory_manager.get());
152 Tensor input2_tensor =
153 makeInputTensor<DataType::FLOAT32>(base_shape, input1_data, _memory_manager.get());
154 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
157 params.activation = Activation::RELU;
159 Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
161 _memory_manager->allocate_memory(output_tensor);
164 EXPECT_THAT(extractTensorData<float>(output_tensor), FloatArrayNear(test_outputs[i], 0.0001f))
165 << "With shape number " << i;
169 template <loco::DataType DType> void CheckInteger(luci_interpreter::IMemoryManager *memory_manager)
171 using dtype = typename loco::DataTypeImpl<DType>::Type;
172 Shape base_shape = {2, 3, 1, 2};
173 std::vector<Shape> test_shapes{{1, 1, 3, 2}, {1, 3, 1, 2}, {2, 1, 3, 1}, {2, 3, 1, 1}};
174 std::vector<std::vector<dtype>> test_outputs = {
175 {3, 3, 0, 1, 0, 8, 5, 1, 0, 0, 2, 6, 8, 0, 1, 0, 5, 1,
176 5, 4, 0, 2, 2, 9, 11, 0, 4, 0, 8, 5, 11, 2, 4, 0, 8, 7},
177 {3, 3, 0, 0, 5, 1, 5, 4, 4, 0, 8, 7},
178 {3, 6, 0, 3, 0, 0, 5, 4, 2, 1, 0, 0, 8, 0, 5, 0, 1, 0,
179 0, 2, 2, 4, 7, 9, 6, 0, 8, 0, 13, 5, 6, 0, 8, 2, 13, 7},
180 {3, 6, 2, 1, 1, 0, 0, 2, 8, 0, 13, 7}};
181 std::vector<dtype> input1_data{-1, 2, 1, 0, 4, -5, 1, 3, 7, -1, 7, 1};
182 std::vector<dtype> input2_data{4, 1, -3, -1, 1, 6};
183 for (size_t i = 0; i < test_shapes.size(); ++i)
185 Tensor input1_tensor = makeInputTensor<DType>(base_shape, input1_data, memory_manager);
186 Tensor input2_tensor = makeInputTensor<DType>(test_shapes[i], input2_data, memory_manager);
187 Tensor output_tensor = makeOutputTensor(DType);
190 params.activation = Activation::RELU;
192 Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
194 memory_manager->allocate_memory(output_tensor);
197 EXPECT_THAT(extractTensorData<dtype>(output_tensor), test_outputs[i])
198 << "With shape number " << i;
200 // Re-run with exchanged inputs.
201 for (size_t i = 0; i < test_shapes.size(); ++i)
203 Tensor input1_tensor = makeInputTensor<DType>(test_shapes[i], input2_data, memory_manager);
204 Tensor input2_tensor = makeInputTensor<DType>(base_shape, input1_data, memory_manager);
205 Tensor output_tensor = makeOutputTensor(DType);
208 params.activation = Activation::RELU;
210 Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
212 memory_manager->allocate_memory(output_tensor);
215 EXPECT_THAT(extractTensorData<dtype>(output_tensor), test_outputs[i])
216 << "With shape number " << i;
220 TEST_F(AddTest, SInt32)
222 CheckInteger<loco::DataType::S32>(_memory_manager.get());
226 TEST_F(AddTest, SInt64)
228 CheckInteger<loco::DataType::S64>(_memory_manager.get());
232 TEST_F(AddTest, SInt16)
234 Shape base_shape = {2, 3, 1, 2};
235 std::vector<Shape> test_shapes{{1, 1, 3, 2}, {1, 3, 1, 2}, {2, 1, 3, 1}, {2, 3, 1, 1}};
236 std::vector<std::vector<int32_t>> ref_output_shapes{
237 {2, 3, 3, 2}, {2, 3, 1, 2}, {2, 3, 3, 2}, {2, 3, 1, 2}};
239 std::vector<float> input1_data{-0.3f, 2.3f, 0.9f, 0.5f, 0.8f, -1.1f,
240 1.2f, 2.8f, -1.6f, 0.0f, 0.7f, -2.2f};
241 std::vector<float> input2_data{0.2f, 0.3f, -0.4f, 0.5f, 1.0f, 0.9f};
242 std::vector<std::vector<float>> ref_outputs = {
243 {0.0f, 2.6f, 0.0f, 2.8f, 0.7f, 3.2f, 1.1f, 0.8f, 0.5f, 1.0f, 1.9f, 1.4f,
244 1.0f, 0.0f, 0.4f, 0.0f, 1.8f, 0.0f, 1.4f, 3.1f, 0.8f, 3.3f, 2.2f, 3.7f,
245 0.0f, 0.3f, 0.0f, 0.5f, 0.0f, 0.9f, 0.9f, 0.0f, 0.3f, 0.0f, 1.7f, 0.0f},
246 {0.0f, 2.6f, 0.5f, 1.0f, 1.8f, 0.0f, 1.4f, 3.1f, 0.0f, 0.5f, 1.7f, 0.0f},
247 {0.0f, 2.5f, 0.0f, 2.6f, 0.0f, 1.9f, 1.1f, 0.7f, 1.2f, 0.8f, 0.5f, 0.1f,
248 1.0f, 0.0f, 1.1f, 0.0f, 0.4f, 0.0f, 1.7f, 3.3f, 2.2f, 3.8f, 2.1f, 3.7f,
249 0.0f, 0.5f, 0.0f, 1.0f, 0.0f, 0.9f, 1.2f, 0.0f, 1.7f, 0.0f, 1.6f, 0.0f},
250 {0.0f, 2.5f, 1.2f, 0.8f, 0.4f, 0.0f, 1.7f, 3.3f, 0.0f, 1.0f, 1.6f, 0.0f}};
252 for (size_t i = 0; i < test_shapes.size(); ++i)
254 Tensor input1_tensor = makeInputTensor<DataType::S16>(base_shape, 3.0 / 32767, 0, input1_data,
255 _memory_manager.get());
256 Tensor input2_tensor = makeInputTensor<DataType::S16>(test_shapes[i], 1.0 / 32767, 0,
257 input2_data, _memory_manager.get());
258 Tensor output_tensor = makeOutputTensor(DataType::S16, 4.0 / 32767, 0);
259 const float tolerance = output_tensor.scale();
262 params.activation = Activation::RELU;
264 Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
266 _memory_manager->allocate_memory(output_tensor);
269 EXPECT_THAT(extractTensorShape(output_tensor),
270 ::testing::ElementsAreArray(ref_output_shapes[i]))
271 << "With shape number " << i;
272 EXPECT_THAT(dequantizeTensorData(output_tensor), FloatArrayNear(ref_outputs[i], tolerance))
273 << "With shape number " << i;
275 // Re-run with exchanged inputs and different scales.
276 for (size_t i = 0; i < test_shapes.size(); ++i)
278 Tensor input1_tensor = makeInputTensor<DataType::S16>(test_shapes[i], 2.0 / 32767, 0,
279 input2_data, _memory_manager.get());
280 Tensor input2_tensor = makeInputTensor<DataType::S16>(base_shape, 4.0 / 32767, 0, input1_data,
281 _memory_manager.get());
282 Tensor output_tensor = makeOutputTensor(DataType::S16, 5.0 / 32767, 0);
283 const float tolerance = output_tensor.scale();
286 params.activation = Activation::RELU;
288 Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
290 _memory_manager->allocate_memory(output_tensor);
293 EXPECT_THAT(extractTensorShape(output_tensor),
294 ::testing::ElementsAreArray(ref_output_shapes[i]))
295 << "With shape number " << i;
296 EXPECT_THAT(dequantizeTensorData(output_tensor), FloatArrayNear(ref_outputs[i], tolerance))
297 << "With shape number " << i;
301 TEST_F(AddTest, Input_Output_Type_NEG)
303 Tensor input1_tensor = makeInputTensor<DataType::FLOAT32>({1}, {1.f}, _memory_manager.get());
304 Tensor input2_tensor = makeInputTensor<DataType::S32>({1}, {2}, _memory_manager.get());
305 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
308 params.activation = Activation::RELU;
310 Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
311 EXPECT_ANY_THROW(kernel.configure());
314 TEST_F(AddTest, Invalid_Output_Type_NEG)
316 Tensor input1_tensor = makeInputTensor<DataType::S64>({1}, {1}, _memory_manager.get());
317 Tensor input2_tensor = makeInputTensor<DataType::S64>({1}, {2}, _memory_manager.get());
318 Tensor output_tensor = makeOutputTensor(DataType::S32);
321 params.activation = Activation::RELU;
323 Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
324 EXPECT_ANY_THROW(kernel.configure());
327 TEST_F(AddTest, Invalid_Input_Type_NEG)
329 Tensor input1_tensor = makeInputTensor<DataType::U64>({1}, {1}, _memory_manager.get());
330 Tensor input2_tensor = makeInputTensor<DataType::U64>({1}, {2}, _memory_manager.get());
331 Tensor output_tensor = makeOutputTensor(DataType::U64);
334 params.activation = Activation::RELU;
336 Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
338 _memory_manager->allocate_memory(output_tensor);
339 EXPECT_ANY_THROW(kernel.execute());
342 TEST_F(AddTest, Invalid_Quantization_NEG)
344 Tensor input1_tensor = makeInputTensor<DataType::S16>({1}, {1}, _memory_manager.get());
345 Tensor input2_tensor = makeInputTensor<DataType::S16>({1}, {2}, _memory_manager.get());
346 Tensor output_tensor = makeOutputTensor(DataType::S16);
349 params.activation = Activation::NONE;
351 Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
352 EXPECT_ANY_THROW(kernel.configure());
356 } // namespace kernels
357 } // namespace luci_interpreter