2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include "kernels/Sub.h"
19 #include "kernels/TestUtils.h"
20 #include "luci_interpreter/TestMemoryManager.h"
24 namespace luci_interpreter
31 using namespace testing;
35 using std::initializer_list;
37 class SubTest : public ::testing::Test
40 void SetUp() override { _memory_manager = std::make_unique<TestMemoryManager>(); }
42 std::unique_ptr<IMemoryManager> _memory_manager;
45 // for quantized Add, the error shouldn't exceed step
46 float GetTolerance(float min, float max)
48 float kQuantizedStep = (max - min) / 255.0;
49 return kQuantizedStep;
52 TEST_F(SubTest, Uint8)
54 Shape base_shape = {2, 3, 1, 2};
55 vector<float> base_data = {-0.3f, 2.3f, 0.9f, 0.5f, 0.8f, -1.1f,
56 1.2f, 2.8f, -1.6f, 0.0f, 0.7f, -2.2f};
57 vector<Shape> test_shapes = {{1, 1, 3, 2}, {1, 3, 1, 2}, {2, 1, 3, 1}, {2, 3, 1, 1}};
58 vector<float> test_data = {0.2f, 0.3f, -0.4f, 0.5f, 1.0f, 0.9f};
59 vector<vector<int32_t>> output_shapes = {{2, 3, 3, 2}, {2, 3, 1, 2}, {2, 3, 3, 2}, {2, 3, 1, 2}};
60 vector<vector<float>> output_data = {
61 {-0.5f, 2.0f, 0.1f, 1.8f, -1.3f, 1.4f, 0.7f, 0.2f, 1.3f, 0.0f, -0.1f, -0.4f,
62 0.6f, -1.4f, 1.2f, -1.6f, -0.2f, -2.0f, 1.0f, 2.5f, 1.6f, 2.3f, 0.2f, 1.9f,
63 -1.8f, -0.3f, -1.2f, -0.5f, -2.6f, -0.9f, 0.5f, -2.5f, 1.1f, -2.7f, -0.3f, -3.0f},
64 {-0.5f, 2.0f, 1.3f, 0.0f, -0.2f, -2.0f, 1.0f, 2.5f, -1.2f, -0.5f, -0.3f, -3.0f},
65 {-0.5f, 2.1f, -0.6f, 2.0f, 0.1f, 2.7f, 0.7f, 0.3f, 0.6f, 0.2f, 1.3f, 0.9f,
66 0.6f, -1.3f, 0.5f, -1.4f, 1.2f, -0.7f, 0.7f, 2.3f, 0.2f, 1.8f, 0.3f, 1.9f,
67 -2.1f, -0.5f, -2.6f, -1.0f, -2.5f, -0.9f, 0.2f, -2.7f, -0.3f, -3.0f, -0.2f, -3.0f},
68 {-0.5f, 2.1f, 0.6f, 0.2f, 1.2f, -0.7f, 0.7f, 2.3f, -2.6f, -1.0f, -0.2f, -3.0f}};
70 float kQuantizedTolerance = GetTolerance(-3.f, 3.f);
71 pair<float, int32_t> quant_param = quantizationParams<uint8_t>(-3.f, 3.f);
72 for (size_t i = 0; i < output_data.size(); ++i)
74 Tensor input1_tensor = makeInputTensor<DataType::U8>(
75 base_shape, quant_param.first, quant_param.second, base_data, _memory_manager.get());
76 Tensor input2_tensor = makeInputTensor<DataType::U8>(
77 test_shapes[i], quant_param.first, quant_param.second, test_data, _memory_manager.get());
78 Tensor output_tensor =
79 makeOutputTensor(getElementType<uint8_t>(), quant_param.first, quant_param.second);
82 params.activation = Activation::NONE;
84 Sub kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
86 _memory_manager->allocate_memory(output_tensor);
89 EXPECT_THAT(dequantizeTensorData(output_tensor),
90 FloatArrayNear(output_data[i], kQuantizedTolerance));
91 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shapes[i]));
94 // Inversion step for output_data, because subtract is not commutative operation
95 auto multiply = [](auto &i) {
96 transform(i.begin(), i.end(), i.begin(), [](auto &value) { return value * -1.0f; });
98 for_each(output_data.begin(), output_data.end(), multiply);
100 // Re-run with exchanged inputs.
101 for (size_t i = 0; i < output_data.size(); ++i)
103 Tensor input1_tensor = makeInputTensor<DataType::U8>(
104 test_shapes[i], quant_param.first, quant_param.second, test_data, _memory_manager.get());
105 Tensor input2_tensor = makeInputTensor<DataType::U8>(
106 base_shape, quant_param.first, quant_param.second, base_data, _memory_manager.get());
107 Tensor output_tensor =
108 makeOutputTensor(getElementType<uint8_t>(), quant_param.first, quant_param.second);
111 params.activation = Activation::NONE;
113 Sub kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
115 _memory_manager->allocate_memory(output_tensor);
118 EXPECT_THAT(dequantizeTensorData(output_tensor),
119 FloatArrayNear(output_data[i], kQuantizedTolerance));
120 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shapes[i]));
124 TEST_F(SubTest, Float)
126 Shape base_shape = {2, 3, 1, 2};
127 vector<Shape> test_shapes{{1, 1, 3, 2}, {1, 3, 1, 2}, {2, 1, 3, 1}, {2, 3, 1, 1}};
128 vector<vector<int32_t>> output_shapes{{2, 3, 3, 2}, {2, 3, 1, 2}, {2, 3, 3, 2}, {2, 3, 1, 2}};
129 vector<vector<float>> test_outputs = {
130 {0.0f, 2.0f, 0.1f, 1.8f, 0.0f, 1.4f, 0.7f, 0.2f, 1.3f, 0.0f, 0.0f, 0.0f,
131 0.6f, 0.0f, 1.2f, 0.0f, 0.0f, 0.0f, 1.0f, 2.5f, 1.6f, 2.3f, 0.2f, 1.9f,
132 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.5f, 0.0f, 1.1f, 0.0f, 0.0f, 0.0f},
133 {0.0f, 2.0f, 1.3f, 0.0f, 0.0f, 0.0f, 1.0f, 2.5f, 0.0f, 0.0f, 0.0f, 0.0f},
134 {0.0f, 2.1f, 0.0f, 2.0f, 0.1f, 2.7f, 0.7f, 0.3f, 0.6f, 0.2f, 1.3f, 0.9f,
135 0.6f, 0.0f, 0.5f, 0.0f, 1.2f, 0.0f, 0.7f, 2.3f, 0.2f, 1.8f, 0.3f, 1.9f,
136 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.2f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f},
137 {0.0f, 2.1f, 0.6f, 0.2f, 1.2f, 0.0f, 0.7f, 2.3f, 0.0f, 0.0f, 0.0f, 0.0f}};
139 vector<float> input1_data{-0.3f, 2.3f, 0.9f, 0.5f, 0.8f, -1.1f,
140 1.2f, 2.8f, -1.6f, 0.0f, 0.7f, -2.2f};
141 vector<float> input2_data{0.2f, 0.3f, -0.4f, 0.5f, 1.0f, 0.9f};
142 for (size_t i = 0; i < test_shapes.size(); ++i)
144 Tensor input1_tensor =
145 makeInputTensor<DataType::FLOAT32>(base_shape, input1_data, _memory_manager.get());
146 Tensor input2_tensor =
147 makeInputTensor<DataType::FLOAT32>(test_shapes[i], input2_data, _memory_manager.get());
148 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
151 params.activation = Activation::RELU;
153 Sub kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
155 _memory_manager->allocate_memory(output_tensor);
158 EXPECT_THAT(extractTensorData<float>(output_tensor), FloatArrayNear(test_outputs[i], 0.0001f))
159 << "With shape number " << i;
161 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shapes[i]));
165 template <loco::DataType DType> void CheckInteger(luci_interpreter::IMemoryManager *memory_manager)
167 using dtype = typename loco::DataTypeImpl<DType>::Type;
168 Shape base_shape = {2, 3, 1, 2};
169 std::vector<Shape> test_shapes{{1, 1, 3, 2}, {1, 3, 1, 2}, {2, 1, 3, 1}, {2, 3, 1, 1}};
170 std::vector<std::vector<dtype>> test_outputs = {
171 {0, 1, 2, 3, 0, 0, 0, 0, 4, 1, 0, 0, 0, 0, 7, 0, 3, 0,
172 0, 2, 4, 4, 0, 0, 3, 0, 10, 0, 6, 0, 3, 0, 10, 2, 6, 0},
173 {0, 1, 4, 1, 3, 0, 0, 2, 10, 0, 6, 0},
174 {0, 0, 0, 1, 2, 5, 0, 0, 0, 0, 4, 3, 0, 0, 3, 0, 7, 0,
175 2, 4, 0, 2, 0, 0, 8, 0, 6, 0, 1, 0, 8, 2, 6, 0, 1, 0},
176 {0, 0, 0, 0, 7, 0, 2, 4, 6, 0, 1, 0}};
177 std::vector<dtype> input1_data{-1, 2, 1, 0, 4, -5, 1, 3, 7, -1, 7, 1};
178 std::vector<dtype> input2_data{4, 1, -3, -1, 1, 6};
179 for (size_t i = 0; i < test_shapes.size(); ++i)
181 Tensor input1_tensor = makeInputTensor<DType>(base_shape, input1_data, memory_manager);
182 Tensor input2_tensor = makeInputTensor<DType>(test_shapes[i], input2_data, memory_manager);
183 Tensor output_tensor = makeOutputTensor(DType);
186 params.activation = Activation::RELU;
188 Sub kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
190 memory_manager->allocate_memory(output_tensor);
193 EXPECT_THAT(extractTensorData<dtype>(output_tensor), test_outputs[i])
194 << "With shape number " << i;
198 TEST_F(SubTest, SInt32)
200 CheckInteger<loco::DataType::S32>(_memory_manager.get());
204 TEST_F(SubTest, SInt64)
206 CheckInteger<loco::DataType::S64>(_memory_manager.get());
210 TEST_F(SubTest, Input_Output_Type_NEG)
212 Tensor input1_tensor = makeInputTensor<DataType::FLOAT32>({1}, {1.f}, _memory_manager.get());
213 Tensor input2_tensor = makeInputTensor<DataType::S32>({1}, {2}, _memory_manager.get());
214 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
217 params.activation = Activation::RELU;
219 Sub kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
220 EXPECT_ANY_THROW(kernel.configure());
223 TEST_F(SubTest, Invalid_Output_Type_NEG)
225 Tensor input1_tensor = makeInputTensor<DataType::S64>({1}, {1}, _memory_manager.get());
226 Tensor input2_tensor = makeInputTensor<DataType::S64>({1}, {2}, _memory_manager.get());
227 Tensor output_tensor = makeOutputTensor(DataType::S32);
230 params.activation = Activation::RELU;
232 Sub kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
233 EXPECT_ANY_THROW(kernel.configure());
236 TEST_F(SubTest, Invalid_Input_Type_NEG)
238 Tensor input1_tensor = makeInputTensor<DataType::U64>({1}, {1}, _memory_manager.get());
239 Tensor input2_tensor = makeInputTensor<DataType::U64>({1}, {2}, _memory_manager.get());
240 Tensor output_tensor = makeOutputTensor(DataType::U64);
243 params.activation = Activation::RELU;
245 Sub kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
247 _memory_manager->allocate_memory(output_tensor);
248 EXPECT_ANY_THROW(kernel.execute());
251 TEST_F(SubTest, Mismatching_Input_Int_Types_NEG)
253 Tensor input1_tensor = makeInputTensor<DataType::S32>({1}, {1}, _memory_manager.get());
254 Tensor input2_tensor = makeInputTensor<DataType::S64>({1}, {2}, _memory_manager.get());
255 Tensor output_tensor = makeOutputTensor(DataType::S32);
258 params.activation = Activation::NONE;
260 Sub kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
261 EXPECT_ANY_THROW(kernel.configure());
265 } // namespace kernels
266 } // namespace luci_interpreter