2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include "kernels/Sub.h"
19 #include "kernels/TestUtils.h"
20 #include "luci_interpreter/TestMemoryManager.h"
24 namespace luci_interpreter
31 using namespace testing;
35 using std::initializer_list;
37 class SubTest : public ::testing::Test
40 void SetUp() override { _memory_manager = std::make_unique<TestMemoryManager>(); }
42 std::unique_ptr<IMemoryManager> _memory_manager;
45 // for quantized Add, the error shouldn't exceed step
46 float GetTolerance(float min, float max)
48 float kQuantizedStep = (max - min) / 255.0;
49 return kQuantizedStep;
52 TEST_F(SubTest, Uint8)
54 Shape base_shape = {2, 3, 1, 2};
55 vector<float> base_data = {-0.3f, 2.3f, 0.9f, 0.5f, 0.8f, -1.1f,
56 1.2f, 2.8f, -1.6f, 0.0f, 0.7f, -2.2f};
57 vector<Shape> test_shapes = {{1, 1, 3, 2}, {1, 3, 1, 2}, {2, 1, 3, 1}, {2, 3, 1, 1}};
58 vector<float> test_data = {0.2f, 0.3f, -0.4f, 0.5f, 1.0f, 0.9f};
59 vector<vector<int32_t>> output_shapes = {{2, 3, 3, 2}, {2, 3, 1, 2}, {2, 3, 3, 2}, {2, 3, 1, 2}};
60 vector<vector<float>> output_data = {
61 {-0.5f, 2.0f, 0.1f, 1.8f, -1.3f, 1.4f, 0.7f, 0.2f, 1.3f, 0.0f, -0.1f, -0.4f,
62 0.6f, -1.4f, 1.2f, -1.6f, -0.2f, -2.0f, 1.0f, 2.5f, 1.6f, 2.3f, 0.2f, 1.9f,
63 -1.8f, -0.3f, -1.2f, -0.5f, -2.6f, -0.9f, 0.5f, -2.5f, 1.1f, -2.7f, -0.3f, -3.0f},
64 {-0.5f, 2.0f, 1.3f, 0.0f, -0.2f, -2.0f, 1.0f, 2.5f, -1.2f, -0.5f, -0.3f, -3.0f},
65 {-0.5f, 2.1f, -0.6f, 2.0f, 0.1f, 2.7f, 0.7f, 0.3f, 0.6f, 0.2f, 1.3f, 0.9f,
66 0.6f, -1.3f, 0.5f, -1.4f, 1.2f, -0.7f, 0.7f, 2.3f, 0.2f, 1.8f, 0.3f, 1.9f,
67 -2.1f, -0.5f, -2.6f, -1.0f, -2.5f, -0.9f, 0.2f, -2.7f, -0.3f, -3.0f, -0.2f, -3.0f},
68 {-0.5f, 2.1f, 0.6f, 0.2f, 1.2f, -0.7f, 0.7f, 2.3f, -2.6f, -1.0f, -0.2f, -3.0f}};
70 float kQuantizedTolerance = GetTolerance(-3.f, 3.f);
71 pair<float, int32_t> quant_param = quantizationParams<uint8_t>(-3.f, 3.f);
72 for (size_t i = 0; i < output_data.size(); ++i)
74 Tensor input1_tensor = makeInputTensor<DataType::U8>(
75 base_shape, quant_param.first, quant_param.second, base_data, _memory_manager.get());
76 Tensor input2_tensor = makeInputTensor<DataType::U8>(
77 test_shapes[i], quant_param.first, quant_param.second, test_data, _memory_manager.get());
78 Tensor output_tensor =
79 makeOutputTensor(getElementType<uint8_t>(), quant_param.first, quant_param.second);
82 params.activation = Activation::NONE;
84 Sub kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
86 _memory_manager->allocate_memory(output_tensor);
89 EXPECT_THAT(dequantizeTensorData(output_tensor),
90 FloatArrayNear(output_data[i], kQuantizedTolerance));
91 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shapes[i]));
94 // Inversion step for output_data, because subtract is not commutative operation
95 auto multiply = [](auto &i) {
96 transform(i.begin(), i.end(), i.begin(), [](auto &value) { return value * -1.0f; });
98 for_each(output_data.begin(), output_data.end(), multiply);
100 // Re-run with exchanged inputs.
101 for (size_t i = 0; i < output_data.size(); ++i)
103 Tensor input1_tensor = makeInputTensor<DataType::U8>(
104 test_shapes[i], quant_param.first, quant_param.second, test_data, _memory_manager.get());
105 Tensor input2_tensor = makeInputTensor<DataType::U8>(
106 base_shape, quant_param.first, quant_param.second, base_data, _memory_manager.get());
107 Tensor output_tensor =
108 makeOutputTensor(getElementType<uint8_t>(), quant_param.first, quant_param.second);
111 params.activation = Activation::NONE;
113 Sub kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
115 _memory_manager->allocate_memory(output_tensor);
118 EXPECT_THAT(dequantizeTensorData(output_tensor),
119 FloatArrayNear(output_data[i], kQuantizedTolerance));
120 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shapes[i]));
124 TEST_F(SubTest, Float)
126 Shape base_shape = {2, 3, 1, 2};
127 vector<Shape> test_shapes{{1, 1, 3, 2}, {1, 3, 1, 2}, {2, 1, 3, 1}, {2, 3, 1, 1}};
128 vector<vector<int32_t>> output_shapes{{2, 3, 3, 2}, {2, 3, 1, 2}, {2, 3, 3, 2}, {2, 3, 1, 2}};
129 vector<vector<float>> test_outputs = {
130 {0.0f, 2.0f, 0.1f, 1.8f, 0.0f, 1.4f, 0.7f, 0.2f, 1.3f, 0.0f, 0.0f, 0.0f,
131 0.6f, 0.0f, 1.2f, 0.0f, 0.0f, 0.0f, 1.0f, 2.5f, 1.6f, 2.3f, 0.2f, 1.9f,
132 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.5f, 0.0f, 1.1f, 0.0f, 0.0f, 0.0f},
133 {0.0f, 2.0f, 1.3f, 0.0f, 0.0f, 0.0f, 1.0f, 2.5f, 0.0f, 0.0f, 0.0f, 0.0f},
134 {0.0f, 2.1f, 0.0f, 2.0f, 0.1f, 2.7f, 0.7f, 0.3f, 0.6f, 0.2f, 1.3f, 0.9f,
135 0.6f, 0.0f, 0.5f, 0.0f, 1.2f, 0.0f, 0.7f, 2.3f, 0.2f, 1.8f, 0.3f, 1.9f,
136 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.2f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f},
137 {0.0f, 2.1f, 0.6f, 0.2f, 1.2f, 0.0f, 0.7f, 2.3f, 0.0f, 0.0f, 0.0f, 0.0f}};
139 vector<float> input1_data{-0.3f, 2.3f, 0.9f, 0.5f, 0.8f, -1.1f,
140 1.2f, 2.8f, -1.6f, 0.0f, 0.7f, -2.2f};
141 vector<float> input2_data{0.2f, 0.3f, -0.4f, 0.5f, 1.0f, 0.9f};
142 for (size_t i = 0; i < test_shapes.size(); ++i)
144 Tensor input1_tensor =
145 makeInputTensor<DataType::FLOAT32>(base_shape, input1_data, _memory_manager.get());
146 Tensor input2_tensor =
147 makeInputTensor<DataType::FLOAT32>(test_shapes[i], input2_data, _memory_manager.get());
148 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
151 params.activation = Activation::RELU;
153 Sub kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
155 _memory_manager->allocate_memory(output_tensor);
158 EXPECT_THAT(extractTensorData<float>(output_tensor), FloatArrayNear(test_outputs[i], 0.0001f))
159 << "With shape number " << i;
161 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shapes[i]));
165 TEST_F(SubTest, Input_Output_Type_NEG)
167 Tensor input1_tensor = makeInputTensor<DataType::FLOAT32>({1}, {1.f}, _memory_manager.get());
168 Tensor input2_tensor = makeInputTensor<DataType::S32>({1}, {2}, _memory_manager.get());
169 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
172 params.activation = Activation::RELU;
174 Sub kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
175 EXPECT_ANY_THROW(kernel.configure());
178 TEST_F(SubTest, Invalid_Input_Type_NEG)
180 Tensor input1_tensor = makeInputTensor<DataType::S64>({1}, {1}, _memory_manager.get());
181 Tensor input2_tensor = makeInputTensor<DataType::S64>({1}, {2}, _memory_manager.get());
182 Tensor output_tensor = makeOutputTensor(DataType::S64);
185 params.activation = Activation::RELU;
187 Sub kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
189 _memory_manager->allocate_memory(output_tensor);
190 EXPECT_ANY_THROW(kernel.execute());
194 } // namespace kernels
195 } // namespace luci_interpreter