2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include "kernels/Add.h"
19 #include "kernels/TestUtils.h"
20 #include "luci_interpreter/TestMemoryManager.h"
22 namespace luci_interpreter
29 using namespace testing;
31 class AddTest : public ::testing::Test
34 void SetUp() override { _memory_manager = std::make_unique<TestMemoryManager>(); }
36 std::unique_ptr<IMemoryManager> _memory_manager;
39 // for quantized Add, the error shouldn't exceed step
40 float GetTolerance(float min, float max)
42 float kQuantizedStep = (max - min) / 255.0;
43 return kQuantizedStep;
46 TEST_F(AddTest, Uint8)
48 std::initializer_list<int32_t> base_shape = {2, 3, 1, 2};
49 std::initializer_list<float> base_data = {-0.3f, 2.3f, 0.9f, 0.5f, 0.8f, -1.1f,
50 1.2f, 2.8f, -1.6f, 0.0f, 0.7f, -2.2f};
51 std::initializer_list<int32_t> test_shapes[] = {
52 {1, 1, 3, 2}, {1, 3, 1, 2}, {2, 1, 3, 1}, {2, 3, 1, 1}};
53 std::initializer_list<float> test_data = {0.2f, 0.3f, -0.4f, 0.5f, 1.0f, 0.9f};
54 std::initializer_list<int32_t> output_shapes[] = {
55 {2, 3, 3, 2}, {2, 3, 1, 2}, {2, 3, 3, 2}, {2, 3, 1, 2}};
56 std::vector<std::vector<float>> output_data = {
57 {-0.1f, 2.6f, -0.7f, 2.8f, 0.7f, 3.0f, 1.1f, 0.8f, 0.5f, 1.0f, 1.9f, 1.4f,
58 1.0f, -0.8f, 0.4f, -0.6f, 1.8f, -0.2f, 1.4f, 3.0f, 0.8f, 3.0f, 2.2f, 3.0f,
59 -1.4f, 0.3f, -2.0f, 0.5f, -0.6f, 0.9f, 0.9f, -1.9f, 0.3f, -1.7f, 1.7f, -1.3f},
60 {-0.1f, 2.6f, 0.5f, 1.0f, 1.8f, -0.2f, 1.4f, 3.0f, -2.0f, 0.5f, 1.7f, -1.3f},
61 {-0.1f, 2.5f, 0.0f, 2.6f, -0.7f, 1.9f, 1.1f, 0.7f, 1.2f, 0.8f, 0.5f, 0.1f,
62 1.0f, -0.9f, 1.1f, -0.8f, 0.4f, -1.5f, 1.7f, 3.0f, 2.2f, 3.0f, 2.1f, 3.0f,
63 -1.1f, 0.5f, -0.6f, 1.0f, -0.7f, 0.9f, 1.2f, -1.7f, 1.7f, -1.2f, 1.6f, -1.3f},
64 {-0.1f, 2.5f, 1.2f, 0.8f, 0.4f, -1.5f, 1.7f, 3.0f, -0.6f, 1.0f, 1.6f, -1.3f}};
65 float kQuantizedTolerance = GetTolerance(-3.f, 3.f);
66 std::pair<float, int32_t> quant_param = quantizationParams<uint8_t>(-3.f, 3.f);
67 for (int i = 0; i < output_data.size(); i++)
69 Tensor input1_tensor = makeInputTensor<DataType::U8>(
70 base_shape, quant_param.first, quant_param.second, base_data, _memory_manager.get());
71 Tensor input2_tensor = makeInputTensor<DataType::U8>(
72 test_shapes[i], quant_param.first, quant_param.second, test_data, _memory_manager.get());
73 Tensor output_tensor =
74 makeOutputTensor(getElementType<uint8_t>(), quant_param.first, quant_param.second);
77 params.activation = Activation::NONE;
79 Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
81 _memory_manager->allocate_memory(output_tensor);
84 EXPECT_THAT(dequantizeTensorData(output_tensor),
85 FloatArrayNear(output_data[i], kQuantizedTolerance));
86 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shapes[i]));
88 // Re-run with exchanged inputs.
89 for (int i = 0; i < output_data.size(); i++)
91 Tensor input1_tensor = makeInputTensor<DataType::U8>(
92 test_shapes[i], quant_param.first, quant_param.second, test_data, _memory_manager.get());
93 Tensor input2_tensor = makeInputTensor<DataType::U8>(
94 base_shape, quant_param.first, quant_param.second, base_data, _memory_manager.get());
95 Tensor output_tensor =
96 makeOutputTensor(getElementType<uint8_t>(), quant_param.first, quant_param.second);
99 params.activation = Activation::NONE;
101 Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
103 _memory_manager->allocate_memory(output_tensor);
106 EXPECT_THAT(dequantizeTensorData(output_tensor),
107 FloatArrayNear(output_data[i], kQuantizedTolerance));
108 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shapes[i]));
112 TEST_F(AddTest, Float)
114 Shape base_shape = {2, 3, 1, 2};
115 std::vector<Shape> test_shapes{{1, 1, 3, 2}, {1, 3, 1, 2}, {2, 1, 3, 1}, {2, 3, 1, 1}};
116 std::vector<std::vector<float>> test_outputs = {
117 {0.0f, 2.6f, 0.0f, 2.8f, 0.7f, 3.2f, 1.1f, 0.8f, 0.5f, 1.0f, 1.9f, 1.4f,
118 1.0f, 0.0f, 0.4f, 0.0f, 1.8f, 0.0f, 1.4f, 3.1f, 0.8f, 3.3f, 2.2f, 3.7f,
119 0.0f, 0.3f, 0.0f, 0.5f, 0.0f, 0.9f, 0.9f, 0.0f, 0.3f, 0.0f, 1.7f, 0.0f},
120 {0.0f, 2.6f, 0.5f, 1.0f, 1.8f, 0.0f, 1.4f, 3.1f, 0.0f, 0.5f, 1.7f, 0.0f},
121 {0.0f, 2.5f, 0.0f, 2.6f, 0.0f, 1.9f, 1.1f, 0.7f, 1.2f, 0.8f, 0.5f, 0.1f,
122 1.0f, 0.0f, 1.1f, 0.0f, 0.4f, 0.0f, 1.7f, 3.3f, 2.2f, 3.8f, 2.1f, 3.7f,
123 0.0f, 0.5f, 0.0f, 1.0f, 0.0f, 0.9f, 1.2f, 0.0f, 1.7f, 0.0f, 1.6f, 0.0f},
124 {0.0f, 2.5f, 1.2f, 0.8f, 0.4f, 0.0f, 1.7f, 3.3f, 0.0f, 1.0f, 1.6f, 0.0f}};
125 std::vector<float> input1_data{-0.3f, 2.3f, 0.9f, 0.5f, 0.8f, -1.1f,
126 1.2f, 2.8f, -1.6f, 0.0f, 0.7f, -2.2f};
127 std::vector<float> input2_data{0.2f, 0.3f, -0.4f, 0.5f, 1.0f, 0.9f};
128 for (size_t i = 0; i < test_shapes.size(); ++i)
130 Tensor input1_tensor =
131 makeInputTensor<DataType::FLOAT32>(base_shape, input1_data, _memory_manager.get());
132 Tensor input2_tensor =
133 makeInputTensor<DataType::FLOAT32>(test_shapes[i], input2_data, _memory_manager.get());
134 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
137 params.activation = Activation::RELU;
139 Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
141 _memory_manager->allocate_memory(output_tensor);
144 EXPECT_THAT(extractTensorData<float>(output_tensor), FloatArrayNear(test_outputs[i], 0.0001f))
145 << "With shape number " << i;
147 // Re-run with exchanged inputs.
148 for (size_t i = 0; i < test_shapes.size(); ++i)
150 Tensor input1_tensor =
151 makeInputTensor<DataType::FLOAT32>(test_shapes[i], input2_data, _memory_manager.get());
152 Tensor input2_tensor =
153 makeInputTensor<DataType::FLOAT32>(base_shape, input1_data, _memory_manager.get());
154 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
157 params.activation = Activation::RELU;
159 Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
161 _memory_manager->allocate_memory(output_tensor);
164 EXPECT_THAT(extractTensorData<float>(output_tensor), FloatArrayNear(test_outputs[i], 0.0001f))
165 << "With shape number " << i;
169 TEST_F(AddTest, SInt16)
171 Shape base_shape = {2, 3, 1, 2};
172 std::vector<Shape> test_shapes{{1, 1, 3, 2}, {1, 3, 1, 2}, {2, 1, 3, 1}, {2, 3, 1, 1}};
173 std::vector<std::vector<int32_t>> ref_output_shapes{
174 {2, 3, 3, 2}, {2, 3, 1, 2}, {2, 3, 3, 2}, {2, 3, 1, 2}};
176 std::vector<float> input1_data{-0.3f, 2.3f, 0.9f, 0.5f, 0.8f, -1.1f,
177 1.2f, 2.8f, -1.6f, 0.0f, 0.7f, -2.2f};
178 std::vector<float> input2_data{0.2f, 0.3f, -0.4f, 0.5f, 1.0f, 0.9f};
179 std::vector<std::vector<float>> ref_outputs = {
180 {0.0f, 2.6f, 0.0f, 2.8f, 0.7f, 3.2f, 1.1f, 0.8f, 0.5f, 1.0f, 1.9f, 1.4f,
181 1.0f, 0.0f, 0.4f, 0.0f, 1.8f, 0.0f, 1.4f, 3.1f, 0.8f, 3.3f, 2.2f, 3.7f,
182 0.0f, 0.3f, 0.0f, 0.5f, 0.0f, 0.9f, 0.9f, 0.0f, 0.3f, 0.0f, 1.7f, 0.0f},
183 {0.0f, 2.6f, 0.5f, 1.0f, 1.8f, 0.0f, 1.4f, 3.1f, 0.0f, 0.5f, 1.7f, 0.0f},
184 {0.0f, 2.5f, 0.0f, 2.6f, 0.0f, 1.9f, 1.1f, 0.7f, 1.2f, 0.8f, 0.5f, 0.1f,
185 1.0f, 0.0f, 1.1f, 0.0f, 0.4f, 0.0f, 1.7f, 3.3f, 2.2f, 3.8f, 2.1f, 3.7f,
186 0.0f, 0.5f, 0.0f, 1.0f, 0.0f, 0.9f, 1.2f, 0.0f, 1.7f, 0.0f, 1.6f, 0.0f},
187 {0.0f, 2.5f, 1.2f, 0.8f, 0.4f, 0.0f, 1.7f, 3.3f, 0.0f, 1.0f, 1.6f, 0.0f}};
189 for (size_t i = 0; i < test_shapes.size(); ++i)
191 Tensor input1_tensor = makeInputTensor<DataType::S16>(base_shape, 3.0 / 32767, 0, input1_data,
192 _memory_manager.get());
193 Tensor input2_tensor = makeInputTensor<DataType::S16>(test_shapes[i], 1.0 / 32767, 0,
194 input2_data, _memory_manager.get());
195 Tensor output_tensor = makeOutputTensor(DataType::S16, 4.0 / 32767, 0);
196 const float tolerance = output_tensor.scale();
199 params.activation = Activation::RELU;
201 Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
203 _memory_manager->allocate_memory(output_tensor);
206 EXPECT_THAT(extractTensorShape(output_tensor),
207 ::testing::ElementsAreArray(ref_output_shapes[i]))
208 << "With shape number " << i;
209 EXPECT_THAT(dequantizeTensorData(output_tensor), FloatArrayNear(ref_outputs[i], tolerance))
210 << "With shape number " << i;
212 // Re-run with exchanged inputs and different scales.
213 for (size_t i = 0; i < test_shapes.size(); ++i)
215 Tensor input1_tensor = makeInputTensor<DataType::S16>(test_shapes[i], 2.0 / 32767, 0,
216 input2_data, _memory_manager.get());
217 Tensor input2_tensor = makeInputTensor<DataType::S16>(base_shape, 4.0 / 32767, 0, input1_data,
218 _memory_manager.get());
219 Tensor output_tensor = makeOutputTensor(DataType::S16, 5.0 / 32767, 0);
220 const float tolerance = output_tensor.scale();
223 params.activation = Activation::RELU;
225 Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
227 _memory_manager->allocate_memory(output_tensor);
230 EXPECT_THAT(extractTensorShape(output_tensor),
231 ::testing::ElementsAreArray(ref_output_shapes[i]))
232 << "With shape number " << i;
233 EXPECT_THAT(dequantizeTensorData(output_tensor), FloatArrayNear(ref_outputs[i], tolerance))
234 << "With shape number " << i;
238 TEST_F(AddTest, Input_Output_Type_NEG)
240 Tensor input1_tensor = makeInputTensor<DataType::FLOAT32>({1}, {1.f}, _memory_manager.get());
241 Tensor input2_tensor = makeInputTensor<DataType::S32>({1}, {2}, _memory_manager.get());
242 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
245 params.activation = Activation::RELU;
247 Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
248 EXPECT_ANY_THROW(kernel.configure());
251 TEST_F(AddTest, Invalid_Input_Type_NEG)
253 Tensor input1_tensor = makeInputTensor<DataType::S64>({1}, {1}, _memory_manager.get());
254 Tensor input2_tensor = makeInputTensor<DataType::S64>({1}, {2}, _memory_manager.get());
255 Tensor output_tensor = makeOutputTensor(DataType::S64);
258 params.activation = Activation::RELU;
260 Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
262 _memory_manager->allocate_memory(output_tensor);
263 EXPECT_ANY_THROW(kernel.execute());
267 } // namespace kernels
268 } // namespace luci_interpreter